repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
eoinof/stem
|
stem/descriptor/networkstatus.py
|
"""
Parsing for Tor network status documents. This supports both the v2 and v3
dir-spec. Documents can be obtained from a few sources...
* the 'cached-consensus' file in tor's data directory
* tor metrics, at https://metrics.torproject.org/data.html
* directory authorities and mirrors via their DirPort
... and contain the following sections...
* document header
* list of :class:`stem.descriptor.networkstatus.DirectoryAuthority`
* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry`
* document footer
Of these, the router status entry section can be quite large (on the order of
hundreds of kilobytes). As such we provide a couple of methods for reading
network status documents...
* :class:`stem.descriptor.networkstatus.NetworkStatusDocumentV3` constructor
If read time and memory aren't a concern then you can simply use the document
constructor. Router entries are assigned to its 'routers' attribute...
::
from stem.descriptor.networkstatus import NetworkStatusDocumentV3
# Reads the full consensus into memory twice (both for the parsed and
# unparsed contents).
consensus_file = open('.tor/cached-consensus', 'r')
consensus = NetworkStatusDocumentV3(consensus_file.read())
consensus_file.close()
for router in consensus.routers:
print router.nickname
* :func:`stem.descriptor.networkstatus.parse_file`
Alternatively, the :func:`~stem.descriptor.networkstatus.parse_file` function
provides an iterator for a document's routers. Those routers refer to a 'thin'
document, which doesn't have a 'routers' attribute. This allows for lower
memory usage and upfront runtime.
::
from stem.descriptor.networkstatus import parse_file
with open('.tor/cached-consensus', 'r') as consensus_file:
# Processes the routers as we read them in. The routers refer to a document
# with an unset 'routers' attribute.
for router in parse_file(consensus_file):
print router.nickname
**Module Overview:**
::
parse_file - parses a network status file, providing an iterator for its routers
NetworkStatusDocument - Network status document
|- NetworkStatusDocumentV2 - Version 2 network status document
+- NetworkStatusDocumentV3 - Version 3 network status document
DocumentSignature - Signature of a document by a directory authority
DirectoryAuthority - Directory authority as defined in a v3 network status document
"""
import datetime
import StringIO
import stem.descriptor
import stem.descriptor.router_status_entry
import stem.version
import stem.util.tor_tools
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
NETWORK_STATUS_V2_FIELDS = (
("network-status-version", True),
("dir-source", True),
("fingerprint", True),
("contact", True),
("dir-signing-key", True),
("client-versions", False),
("server-versions", False),
("published", True),
("dir-options", False),
("directory-signature", True),
)
# Network status document are either a 'vote' or 'consensus', with different
# mandatory fields for each. Both though require that their fields appear in a
# specific order. This is an ordered listing of the following...
#
# (field, in_votes, in_consensus, is_mandatory)
HEADER_STATUS_DOCUMENT_FIELDS = (
("network-status-version", True, True, True),
("vote-status", True, True, True),
("consensus-methods", True, False, False),
("consensus-method", False, True, False),
("published", True, False, True),
("valid-after", True, True, True),
("fresh-until", True, True, True),
("valid-until", True, True, True),
("voting-delay", True, True, True),
("client-versions", True, True, False),
("server-versions", True, True, False),
("known-flags", True, True, True),
("params", True, True, False),
)
FOOTER_STATUS_DOCUMENT_FIELDS = (
("directory-footer", True, True, True),
("bandwidth-weights", False, True, False),
("directory-signature", True, True, True),
)
HEADER_FIELDS = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
FOOTER_FIELDS = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
AUTH_START = "dir-source"
ROUTERS_START = "r"
FOOTER_START = "directory-footer"
V2_FOOTER_START = "directory-signature"
DEFAULT_PARAMS = {
"bwweightscale": 10000,
"cbtdisabled": 0,
"cbtnummodes": 3,
"cbtrecentcount": 20,
"cbtmaxtimeouts": 18,
"cbtmincircs": 100,
"cbtquantile": 80,
"cbtclosequantile": 95,
"cbttestfreq": 60,
"cbtmintimeout": 2000,
"cbtinitialtimeout": 60000,
}
# KeyCertificate fields, tuple is of the form...
# (keyword, is_mandatory)
KEY_CERTIFICATE_PARAMS = (
('dir-key-certificate-version', True),
('dir-address', False),
('fingerprint', True),
('dir-identity-key', True),
('dir-key-published', True),
('dir-key-expires', True),
('dir-signing-key', True),
('dir-key-crosscert', False),
('dir-key-certification', True),
)
BANDWIDTH_WEIGHT_ENTRIES = (
"Wbd", "Wbe", "Wbg", "Wbm",
"Wdb",
"Web", "Wed", "Wee", "Weg", "Wem",
"Wgb", "Wgd", "Wgg", "Wgm",
"Wmb", "Wmd", "Wme", "Wmg", "Wmm",
)
def parse_file(document_file, validate = True, is_microdescriptor = False, document_version = 3):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
document that these instances reference have an empty 'routers' attribute to
allow for limited memory usage.
:param file document_file: file with network status document content
:param bool validate: checks the validity of the document's contents if
**True**, skips these checks otherwise
:param bool is_microdescriptor: **True** if this is for a microdescriptor
consensus, **False** otherwise
:param int document_version: network status document version
:returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object
:raises:
* **ValueError** if the document_version is unrecognized or the contents is
malformed and validate is **True**
* **IOError** if the file can't be read
"""
# getting the document without the routers section
header = stem.descriptor._read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file)
routers_start = document_file.tell()
stem.descriptor._read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True)
routers_end = document_file.tell()
footer = document_file.readlines()
document_content = "".join(header + footer)
if document_version == 2:
document_type = NetworkStatusDocumentV2
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
elif document_version == 3:
document_type = NetworkStatusDocumentV3
if not is_microdescriptor:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
else:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
else:
raise ValueError("Document version %i isn't recognized (only able to parse v2 or v3)" % document_version)
desc_iterator = stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
start_position = routers_start,
end_position = routers_end,
extra_args = (document_type(document_content, validate),),
)
for desc in desc_iterator:
yield desc
class NetworkStatusDocument(stem.descriptor.Descriptor):
"""
Common parent for network status documents.
"""
def __init__(self, raw_content):
super(NetworkStatusDocument, self).__init__(raw_content)
self._unrecognized_lines = []
def get_unrecognized_lines(self):
return list(self._unrecognized_lines)
class NetworkStatusDocumentV2(NetworkStatusDocument):
"""
Version 2 network status document. These have been deprecated and are no
longer generated by Tor.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var int version: **\*** document version
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str contact: **\*** authority's contact information
:var str signing_key: **\*** authority's public signing key
:var list client_versions: list of recommended client tor version strings
:var list server_versions: list of recommended server tor version strings
:var datetime published: **\*** time when the document was published
:var list options: **\*** list of things that this authority decides
:var str signing_authority: **\*** name of the authority signing the document
:var str signature: **\*** authority's signature for the document
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
def __init__(self, raw_content, validate = True):
super(NetworkStatusDocumentV2, self).__init__(raw_content)
self.version = None
self.hostname = None
self.address = None
self.dir_port = None
self.fingerprint = None
self.contact = None
self.signing_key = None
self.client_versions = []
self.server_versions = []
self.published = None
self.options = []
self.signing_authority = None
self.signatures = None
# Splitting the document from the routers. Unlike v3 documents we're not
# bending over backwards on the validation by checking the field order or
# that header/footer attributes aren't in the wrong section. This is a
# deprecated descriptor type - patches welcome if you want those checks.
document_file = StringIO.StringIO(raw_content)
document_content = "".join(stem.descriptor._read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file))
self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV2,
entry_keyword = ROUTERS_START,
section_end_keywords = V2_FOOTER_START,
extra_args = (self,),
))
document_content += "\n" + document_file.read()
entries = stem.descriptor._get_descriptor_components(document_content, validate)
if validate: self._check_constraints(entries)
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value) # original line
if block_contents: line += "\n%s" % block_contents
if keyword == "network-status-version":
if not value.isdigit():
if not validate: continue
raise ValueError("Network status document has a non-numeric version: %s" % line)
self.version = int(value)
if validate and self.version != 2:
raise ValueError("Expected a version 2 network status document, got version '%s' instead" % self.version)
elif keyword == "dir-source":
dir_source_comp = value.split()
if len(dir_source_comp) < 3:
if not validate: continue
raise ValueError("The 'dir-source' line of a v2 network status document must have three values: %s" % line)
if validate:
if not dir_source_comp[0]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: %s" % line)
elif not stem.util.connection.is_valid_ip_address(dir_source_comp[1]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1])
elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2])
elif not dir_source_comp[2].isdigit():
continue
self.hostname = dir_source_comp[0]
self.address = dir_source_comp[1]
self.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2])
elif keyword == "fingerprint":
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority's fingerprint in a v2 network status document is malformed: %s" % line)
self.fingerprint = value
elif keyword == "contact":
self.contact = value
elif keyword == "dir-signing-key":
self.signing_key = block_contents
elif keyword in ("client-versions", "server-versions"):
# v2 documents existed while there were tor versions using the 'old'
# style, hence we aren't attempting to parse them
for version_str in value.split(","):
if keyword == 'client-versions':
self.client_versions.append(version_str)
elif keyword == 'server-versions':
self.server_versions.append(version_str)
elif keyword == "published":
try:
self.published = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
except ValueError:
if validate:
raise ValueError("Version 2 network status document's 'published' time wasn't parseable: %s" % value)
elif keyword == "dir-options":
self.options = value.split()
elif keyword == "directory-signature":
self.signing_authority = value
self.signature = block_contents
else:
self._unrecognized_lines.append(line)
# 'client-versions' and 'server-versions' are only required if "Versions"
# is among the options
if validate and "Versions" in self.options:
if not ('client-versions' in entries and 'server-versions' in entries):
raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self))
def _check_constraints(self, entries):
required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory]
for keyword in required_fields:
if not keyword in entries:
raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self)))
# all recognized fields can only appear once
single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS]
for keyword in single_fields:
if keyword in entries and len(entries[keyword]) > 1:
raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self)))
if 'network-status-version' != entries.keys()[0]:
raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self))
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
contained in the document
:var int version: **\*** document version
:var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
:var bool is_consensus: **\*** **True** if the document is a consensus
:var bool is_vote: **\*** **True** if the document is a vote
:var bool is_microdescriptor: **\*** **True** if this is a microdescriptor
flavored document, **False** otherwise
:var datetime valid_after: **\*** time when the consensus became valid
:var datetime fresh_until: **\*** time when the next consensus should be produced
:var datetime valid_until: **\*** time when this consensus becomes obsolete
:var int vote_delay: **\*** number of seconds allowed for collecting votes
from all authorities
:var int dist_delay: **\*** number of seconds allowed for collecting
signatures from all authorities
:var list client_versions: list of recommended client tor versions
:var list server_versions: list of recommended server tor versions
:var list known_flags: **\*** list of known router flags
:var list params: **\*** dict of parameter(**str**) => value(**int**) mappings
:var list directory_authorities: **\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority`
objects that have generated this document
:var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature`
of the authorities that have signed the document
**Consensus Attributes:**
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
"""
def __init__(self, raw_content, validate = True, default_params = True):
"""
Parse a v3 network status document.
:param str raw_content: raw network status document data
:param bool validate: **True** if the document is to be validated, **False** otherwise
:param bool default_params: includes defaults in our params dict, otherwise
it just contains values from the document
:raises: **ValueError** if the document is invalid
"""
super(NetworkStatusDocumentV3, self).__init__(raw_content)
document_file = StringIO.StringIO(raw_content)
self._header = _DocumentHeader(document_file, validate, default_params)
# merge header attributes into us
for attr, value in vars(self._header).items():
if attr != "_unrecognized_lines":
setattr(self, attr, value)
else:
self._unrecognized_lines += value
self.directory_authorities = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = DirectoryAuthority,
entry_keyword = AUTH_START,
section_end_keywords = (ROUTERS_START, FOOTER_START),
extra_args = (self._header.is_vote,),
))
if not self._header.is_microdescriptor:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryV3
else:
router_type = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
self.routers = tuple(stem.descriptor.router_status_entry.parse_file(
document_file,
validate,
entry_class = router_type,
entry_keyword = ROUTERS_START,
section_end_keywords = FOOTER_START,
extra_args = (self,),
))
self._footer = _DocumentFooter(document_file, validate, self._header)
# merge header attributes into us
for attr, value in vars(self._footer).items():
if attr != "_unrecognized_lines":
setattr(self, attr, value)
else:
self._unrecognized_lines += value
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
return self._header.meets_consensus_method(method)
def __cmp__(self, other):
if not isinstance(other, NetworkStatusDocumentV3):
return 1
return str(self) > str(other)
class _DocumentHeader(object):
def __init__(self, document_file, validate, default_params):
self.version = None
self.version_flavor = None
self.is_consensus = True
self.is_vote = False
self.is_microdescriptor = False
self.consensus_methods = []
self.published = None
self.consensus_method = None
self.valid_after = None
self.fresh_until = None
self.valid_until = None
self.vote_delay = None
self.dist_delay = None
self.client_versions = []
self.server_versions = []
self.known_flags = []
self.params = dict(DEFAULT_PARAMS) if default_params else {}
self._unrecognized_lines = []
content = "".join(stem.descriptor._read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = stem.descriptor._get_descriptor_components(content, validate)
self._parse(entries, validate)
# doing this validation afterward so we know our 'is_consensus' and
# 'is_vote' attributes
if validate:
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, HEADER_FIELDS)
def meets_consensus_method(self, method):
return bool(self.consensus_method >= method or filter(lambda x: x >= method, self.consensus_methods))
def _parse(self, entries, validate):
for keyword, values in entries.items():
value, _ = values[0]
line = "%s %s" % (keyword, value)
# all known header fields can only appear once except
if validate and len(values) > 1 and keyword in HEADER_FIELDS:
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if keyword == 'network-status-version':
# "network-status-version" version
if ' ' in value:
version, flavor = value.split(' ', 1)
else:
version, flavor = value, None
if not version.isdigit():
if not validate: continue
raise ValueError("Network status document has a non-numeric version: %s" % line)
self.version = int(version)
self.version_flavor = flavor
self.is_microdescriptor = flavor == 'microdesc'
if validate and self.version != 3:
raise ValueError("Expected a version 3 network status document, got version '%s' instead" % self.version)
elif keyword == 'vote-status':
# "vote-status" type
#
# The consensus-method and consensus-methods fields are optional since
# they weren't included in version 1. Setting a default now that we
# know if we're a vote or not.
if value == 'consensus':
self.is_consensus, self.is_vote = True, False
self.consensus_method = 1
elif value == 'vote':
self.is_consensus, self.is_vote = False, True
self.consensus_methods = [1]
elif validate:
raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value)
elif keyword == 'consensus-methods':
# "consensus-methods" IntegerList
consensus_methods = []
for entry in value.split(" "):
if entry.isdigit():
consensus_methods.append(int(entry))
elif validate:
raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value)
self.consensus_methods = consensus_methods
if validate and not (1 in self.consensus_methods):
raise ValueError("Network status votes must include consensus-method version 1")
elif keyword == 'consensus-method':
# "consensus-method" Integer
if value.isdigit():
self.consensus_method = int(value)
elif validate:
raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value)
elif keyword in ('published', 'valid-after', 'fresh-until', 'valid-until'):
try:
date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
if keyword == 'published':
self.published = date_value
elif keyword == 'valid-after':
self.valid_after = date_value
elif keyword == 'fresh-until':
self.fresh_until = date_value
elif keyword == 'valid-until':
self.valid_until = date_value
except ValueError:
if validate:
raise ValueError("Network status document's '%s' time wasn't parseable: %s" % (keyword, value))
elif keyword == "voting-delay":
# "voting-delay" VoteSeconds DistSeconds
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit():
self.vote_delay = int(value_comp[0])
self.dist_delay = int(value_comp[1])
elif validate:
raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value)
elif keyword in ("client-versions", "server-versions"):
for entry in value.split(","):
try:
version_value = stem.version.Version(entry)
if keyword == 'client-versions':
self.client_versions.append(version_value)
elif keyword == 'server-versions':
self.server_versions.append(version_value)
except ValueError:
if validate:
raise ValueError("Network status document's '%s' line had '%s', which isn't a parseable tor version: %s" % (keyword, entry, line))
elif keyword == "known-flags":
# "known-flags" FlagList
# simply fetches the entries, excluding empty strings
self.known_flags = [entry for entry in value.split(" ") if entry]
elif keyword == "params":
# "params" [Parameters]
# Parameter ::= Keyword '=' Int32
# Int32 ::= A decimal integer between -2147483648 and 2147483647.
# Parameters ::= Parameter | Parameters SP Parameter
# should only appear in consensus-method 7 or later
if validate and not self.meets_consensus_method(7):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
# skip if this is a blank line
if value == "": continue
self.params.update(_parse_int_mappings(keyword, value, validate))
if validate:
self._check_params_constraints()
else:
self._unrecognized_lines.append(line)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
# all parameters are constrained to int32 range
minimum, maximum = -2147483648, 2147483647
if key == "circwindow":
minimum, maximum = 100, 1000
elif key == "CircuitPriorityHalflifeMsec":
minimum = -1
elif key in ("perconnbwrate", "perconnbwburst"):
minimum = 1
elif key == "refuseunknownexits":
minimum, maximum = 0, 1
elif key == "bwweightscale":
minimum = 1
elif key == "cbtdisabled":
minimum, maximum = 0, 1
elif key == "cbtnummodes":
minimum, maximum = 1, 20
elif key == "cbtrecentcount":
minimum, maximum = 3, 1000
elif key == "cbtmaxtimeouts":
minimum, maximum = 3, 10000
elif key == "cbtmincircs":
minimum, maximum = 1, 10000
elif key == "cbtquantile":
minimum, maximum = 10, 99
elif key == "cbtclosequantile":
minimum, maximum = self.params.get("cbtquantile", minimum), 99
elif key == "cbttestfreq":
minimum = 1
elif key == "cbtmintimeout":
minimum = 500
elif key == "cbtinitialtimeout":
minimum = self.params.get("cbtmintimeout", minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
class _DocumentFooter(object):
def __init__(self, document_file, validate, header):
self.signatures = []
self.bandwidth_weights = {}
self._unrecognized_lines = []
content = document_file.read()
if validate and content and not header.meets_consensus_method(9):
raise ValueError("Network status document's footer should only appear in consensus-method 9 or later")
elif not content and not header.meets_consensus_method(9):
return # footer is optional and there's nothing to parse
entries = stem.descriptor._get_descriptor_components(content, validate)
self._parse(entries, validate, header)
if validate:
_check_for_missing_and_disallowed_fields(header, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, FOOTER_FIELDS)
def _parse(self, entries, validate, header):
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value)
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if validate and len(values) > 1 and keyword in FOOTER_FIELDS:
if not (keyword == 'directory-signature' and header.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if keyword == "directory-footer":
# nothing to parse, simply checking that we don't have a value
if validate and value:
raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got '%s'" % line)
elif keyword == "bandwidth-weights":
self.bandwidth_weights = _parse_int_mappings(keyword, value, validate)
if validate:
weight_keys = tuple(sorted(self.bandwidth_weights.keys()))
if weight_keys != BANDWIDTH_WEIGHT_ENTRIES:
expected_label = ', '.join(BANDWIDTH_WEIGHT_ENTRIES)
actual_label = ', '.join(weight_keys)
raise ValueError("A network status document's 'bandwidth-weights' entries should be '%s', got '%s'" % (expected_label, actual_label))
elif keyword == "directory-signature":
for sig_value, block_contents in values:
if not header.is_microdescriptor:
expected_spaces = 1
format_label = 'directory-signature FINGERPRINT KEY_DIGEST'
else:
expected_spaces = 2
format_label = 'directory-signature METHOD FINGERPRINT KEY_DIGEST'
if sig_value.count(" ") != expected_spaces or not block_contents:
if not validate: continue
raise ValueError("Authority signatures in a network status document are expected to be of the form '%s\\nSIGNATURE', got:\n%s\n%s" % (format_label, sig_value, block_contents))
if not header.is_microdescriptor:
method = None
fingerprint, key_digest = sig_value.split(" ", 1)
else:
method, fingerprint, key_digest = sig_value.split(" ", 2)
self.signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, validate))
def _check_for_missing_and_disallowed_fields(header, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param _DocumentHeader header: document header
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((header.is_consensus and in_consensus) or (header.is_vote and in_votes)):
# mandatory field, check that we have it
if not field in entries.keys():
missing_fields.append(field)
elif (header.is_consensus and not in_consensus) or (header.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError("Network status document is missing mandatory field: %s" % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _check_for_misordered_fields(entries, expected):
"""
To be valid a network status document's fiends need to appear in a specific
order. Checks that known fields appear in that order (unrecognized fields
are ignored).
:param dict entries: ordered keyword/value mappings of the header or footer
:param list expected: ordered list of expected fields (either
**HEADER_FIELDS** or **FOOTER_FIELDS**)
:raises: **ValueError** if entries aren't properly ordered
"""
# Earlier validation has ensured that our fields either belong to our
# document type or are unknown. Remove the unknown fields since they
# reflect a spec change and can appear anywhere in the document.
actual = filter(lambda field: field in expected, entries.keys())
# Narrow the expected to just what we have. If the lists then match then the
# order's valid.
expected = filter(lambda field: field in actual, expected)
if actual != expected:
actual_label = ', '.join(actual)
expected_label = ', '.join(expected)
raise ValueError("The fields in a section of the document are misordered. It should be '%s' but was '%s'" % (actual_label, expected_label))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
for entry in value.split(" "):
try:
if not '=' in entry:
raise ValueError("must only have 'key=value' entries")
entry_key, entry_value = entry.split("=", 1)
try:
# the int() function accepts things like '+123', but we don't want to
if entry_value.startswith('+'):
raise ValueError()
entry_value = int(entry_value)
except ValueError:
raise ValueError("'%s' is a non-numeric value" % entry_value)
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > entry_key:
raise ValueError("parameters must be sorted by their key")
results[entry_key] = entry_value
seen_keys.append(entry_key)
except ValueError, exc:
if not validate: continue
raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
return results
class DirectoryAuthority(stem.descriptor.Descriptor):
"""
Directory authority information obtained from a v3 network status document.
:var str nickname: **\*** authority's nickname
:var str fingerprint: **\*** authority's fingerprint
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var int or_port: **\*** authority's ORPort
:var str contact: **\*** contact information
**Consensus Attributes:**
:var str vote_digest: **\*** digest of the authority that contributed to the consensus
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
**\*** mandatory attribute
"""
def __init__(self, raw_content, validate = True, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
:param str raw_content: raw directory authority entry information
:param bool validate: checks the validity of the content if True, skips
these checks otherwise
:param bool is_vote: True if this is for a vote, False if it's for a consensus
:raises: ValueError if the descriptor data is invalid
"""
super(DirectoryAuthority, self).__init__(raw_content)
self.nickname = None
self.fingerprint = None
self.hostname = None
self.address = None
self.dir_port = None
self.or_port = None
self.contact = None
self.vote_digest = None
self.legacy_dir_key = None
self.key_certificate = None
self._unrecognized_lines = []
self._parse(raw_content, validate, is_vote)
def _parse(self, content, validate, is_vote):
"""
Parses the given content and applies the attributes.
:param str content: descriptor content
:param bool validate: checks validity if True
:param bool is_vote: **True** if this is for a vote, **False** if it's for
a consensus
:raises: **ValueError** if a validity check fails
"""
# separate the directory authority entry from its key certificate
key_div = content.find('\ndir-key-certificate-version')
if key_div != -1:
key_cert_content = content[key_div + 1:]
content = content[:key_div + 1]
else:
key_cert_content = None
entries = stem.descriptor._get_descriptor_components(content, validate)
if validate and 'dir-source' != entries.keys()[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
# check that we have mandatory fields
if validate:
required_fields, excluded_fields = ["dir-source", "contact"], []
if is_vote:
if not key_cert_content:
raise ValueError("Authority votes must have a key certificate:\n%s" % content)
excluded_fields += ["vote-digest"]
elif not is_vote:
if key_cert_content:
raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content)
required_fields += ["vote-digest"]
excluded_fields += ["legacy-dir-key"]
for keyword in required_fields:
if not keyword in entries:
raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content))
for keyword in entries:
if keyword in excluded_fields:
type_label = "votes" if is_vote else "consensus entries"
raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content))
for keyword, values in entries.items():
value, _ = values[0]
line = "%s %s" % (keyword, value)
# all known attributes can only appear at most once
if validate and len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'):
raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content))
if keyword == 'dir-source':
# "dir-source" nickname identity address IP dirport orport
dir_source_comp = value.split(" ")
if len(dir_source_comp) < 6:
if not validate: continue
raise ValueError("Authority entry's 'dir-source' line must have six values: %s" % line)
if validate:
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0]):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's fingerprint is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: %s" % line)
elif not stem.util.connection.is_valid_ip_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
elif not (dir_source_comp[4].isdigit() and dir_source_comp[5].isdigit()):
continue
self.nickname = dir_source_comp[0]
self.fingerprint = dir_source_comp[1]
self.hostname = dir_source_comp[2]
self.address = dir_source_comp[3]
self.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
self.or_port = int(dir_source_comp[5])
elif keyword == 'contact':
# "contact" string
self.contact = value
elif keyword == 'legacy-dir-key':
# "legacy-dir-key" FINGERPRINT
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority has a malformed legacy directory key: %s" % line)
self.legacy_dir_key = value
elif keyword == 'vote-digest':
# "vote-digest" digest
# technically not a fingerprint, but has the same characteristics
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Authority has a malformed vote digest: %s" % line)
self.vote_digest = value
else:
self._unrecognized_lines.append(line)
if key_cert_content:
self.key_certificate = KeyCertificate(key_cert_content, validate)
def get_unrecognized_lines(self):
"""
Returns any unrecognized lines.
:returns: a list of unrecognized lines
"""
return self._unrecognized_lines
def __cmp__(self, other):
if not isinstance(other, DirectoryAuthority):
return 1
return str(self) > str(other)
class KeyCertificate(stem.descriptor.Descriptor):
"""
Directory key certificate for a v3 network status document.
:var int version: **\*** version of the key certificate
:var str address: authority's IP address
:var int dir_port: authority's DirPort
:var str fingerprint: **\*** authority's fingerprint
:var str identity_key: **\*** long term authority identity key
:var datetime published: **\*** time when this key was generated
:var datetime expires: **\*** time after which this key becomes invalid
:var str signing_key: **\*** directory server's public signing key
:var str crosscert: signature made using certificate's signing key
:var str certification: **\*** signature of this key certificate signed with
the identity key
**\*** mandatory attribute
"""
def __init__(self, raw_content, validate = True):
super(KeyCertificate, self).__init__(raw_content)
self.version = None
self.address = None
self.dir_port = None
self.fingerprint = None
self.identity_key = None
self.published = None
self.expires = None
self.signing_key = None
self.crosscert = None
self.certification = None
self._unrecognized_lines = []
self._parse(raw_content, validate)
def _parse(self, content, validate):
"""
Parses the given content and applies the attributes.
:param str content: descriptor content
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
entries = stem.descriptor._get_descriptor_components(content, validate)
if validate:
if 'dir-key-certificate-version' != entries.keys()[0]:
raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (content))
elif 'dir-key-certification' != entries.keys()[-1]:
raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (content))
# check that we have mandatory fields and that our known fields only
# appear once
for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS:
if is_mandatory and not keyword in entries:
raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, content))
entry_count = len(entries.get(keyword, []))
if entry_count > 1:
raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, content))
for keyword, values in entries.items():
value, block_contents = values[0]
line = "%s %s" % (keyword, value)
if keyword == 'dir-key-certificate-version':
# "dir-key-certificate-version" version
if not value.isdigit():
if not validate: continue
raise ValueError("Key certificate has a non-integer version: %s" % line)
self.version = int(value)
if validate and self.version != 3:
raise ValueError("Expected a version 3 key certificate, got version '%i' instead" % self.version)
elif keyword == 'dir-address':
# "dir-address" IPPort
if not ':' in value:
if not validate: continue
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: %s" % line)
address, dirport = value.split(':', 1)
if validate:
if not stem.util.connection.is_valid_ip_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: %s" % line)
elif not stem.util.connection.is_valid_port(dirport):
raise ValueError("Key certificate's dirport is invalid: %s" % line)
elif not dirport.isdigit():
continue
self.address = address
self.dir_port = int(dirport)
elif keyword == 'fingerprint':
# "fingerprint" fingerprint
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError("Key certificate's fingerprint is malformed: %s" % line)
self.fingerprint = value
elif keyword in ('dir-key-published', 'dir-key-expires'):
# "dir-key-published" YYYY-MM-DD HH:MM:SS
# "dir-key-expires" YYYY-MM-DD HH:MM:SS
try:
date_value = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
if keyword == 'dir-key-published':
self.published = date_value
elif keyword == 'dir-key-expires':
self.expires = date_value
except ValueError:
if validate:
raise ValueError("Key certificate's '%s' time wasn't parseable: %s" % (keyword, value))
elif keyword in ('dir-identity-key', 'dir-signing-key', 'dir-key-crosscert', 'dir-key-certification'):
# "dir-identity-key" NL a public key in PEM format
# "dir-signing-key" NL a key in PEM format
# "dir-key-crosscert" NL CrossSignature
# "dir-key-certification" NL Signature
if validate and not block_contents:
raise ValueError("Key certificate's '%s' line must be followed by a key block: %s" % (keyword, line))
if keyword == 'dir-identity-key':
self.identity_key = block_contents
elif keyword == 'dir-signing-key':
self.signing_key = block_contents
elif keyword == 'dir-key-crosscert':
self.crosscert = block_contents
elif keyword == 'dir-key-certification':
self.certification = block_contents
else:
self._unrecognized_lines.append(line)
def get_unrecognized_lines(self):
"""
Returns any unrecognized lines.
:returns: **list** of unrecognized lines
"""
return self._unrecognized_lines
def __cmp__(self, other):
if not isinstance(other, KeyCertificate):
return 1
return str(self) > str(other)
class DocumentSignature(object):
"""
Directory signature of a v3 network status document.
:var str method: method used to make the signature, this only appears in
microdescriptor consensuses
:var str identity: fingerprint of the authority that made the signature
:var str key_digest: digest of the signing key
:var str signature: document signature
:param bool validate: checks validity if **True**
:raises: **ValueError** if a validity check fails
"""
def __init__(self, method, identity, key_digest, signature, validate = True):
# Checking that these attributes are valid. Technically the key
# digest isn't a fingerprint, but it has the same characteristics.
if validate:
if not stem.util.tor_tools.is_valid_fingerprint(identity):
raise ValueError("Malformed fingerprint (%s) in the document signature" % (identity))
if not stem.util.tor_tools.is_valid_fingerprint(key_digest):
raise ValueError("Malformed key digest (%s) in the document signature" % (key_digest))
# TODO: The method field is undocumented so I'm just guessing how we should
# handle it. Ticket for clarification...
# https://trac.torproject.org/7072
self.method = method
self.identity = identity
self.key_digest = key_digest
self.signature = signature
def __cmp__(self, other):
if not isinstance(other, DocumentSignature):
return 1
for attr in ("identity", "key_digest", "signature"):
if getattr(self, attr) > getattr(other, attr): return 1
elif getattr(self, attr) < getattr(other, attr): return -1
return 0
|
reverendhomer/ANUS-Python-Menu
|
l.py
|
#!/usr/bin/env python3
import curses
from CursesMenu import CursesMenu
def foo():
print('baaaang!')
return 0
if __name__ == '__main__':
s = curses.initscr()
m = CursesMenu(s, 'menu')
m.add('one', foo)
m.add('two', foo)
m.add('three', foo)
try:
m.run()
except Exception:
curses.endwin()
raise
else:
curses.endwin()
|
piotr-worotnicki/raspberry-pi-rgb-led-controller
|
led/migrations/0003_auto_20190101_2024.py
|
# Generated by Django 2.1.1 on 2019-01-01 19:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('led', '0002_auto_20181229_2037'),
]
operations = [
migrations.RemoveField(
model_name='currentledstate',
name='blue',
),
migrations.RemoveField(
model_name='currentledstate',
name='green',
),
migrations.RemoveField(
model_name='currentledstate',
name='red',
),
]
|
zzzzzzzlmy/MyLeetCode
|
771. Jewels and Stones.py
|
'''
You're given strings J representing the types of stones that are
jewels, and S representing the stones you have. Each character
in S is a type of stone you have. You want to know how many of
the stones you have are also jewels.
The letters in J are guaranteed distinct, and all characters in
J and S are letters. Letters are case sensitive, so "a" is
considered a different type of stone from "A".
Example 1:
Input: J = "aA", S = "aAAbbbb"
Output: 3
Example 2:
Input: J = "z", S = "ZZ"
Output: 0
Note:
S and J will consist of letters and have length at most 50.
The characters in J are distinct.
'''
class Solution(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
return sum(s in J for s in S)
|
coinfee/coinfee-python
|
example/wsgi.py
|
"""
coinfee.net example.
"""
import warnings
from uuid import uuid4 as random_uuid
import jinja2
import coinfee
# You should change this unless you want to make a donation.
ADDRESS = '16jCrzcXo2PxadrQiQwUgwrmEwDGQYBwZq'
# Price in Satoshis. 10,000 or more.
SATOSHIS = 10000
# See deprecation warnings in logs.
warnings.simplefilter('always')
def render(template, page={}):
template = jinja2.Environment(
loader=jinja2.FileSystemLoader('./')
).get_template(template)
return str(template.render(page=page))
def application(env, start_response):
"""
This is where uwsgi calls us.
"""
def reply(status, data, headers=[]):
"""
Need to use this as return reply().
"""
start_response(str(status), headers)
return data
path = env['REQUEST_URI']
if path == '/purchase':
# Generate a random ID for payment.
uuid = str(random_uuid())[:19]
url = '/purchase/{}'.format(uuid)
# Redirect to unique buy URL.
return reply(307, '', [('Location', url)])
if path.startswith('/purchase/'):
page = {}
page['unique'] = path[len('/purchase/'):]
coinfee_payment = coinfee.payment(ADDRESS,
SATOSHIS,
page['unique'])
page['paid'] = coinfee_payment.status
page['address'] = coinfee_payment.address
page['satoshis'] = coinfee_payment.satoshis
page['bitcoins'] = "{0:.8f}".format(page['satoshis'] *
0.00000001)
return reply(200, render('purchase.html', page))
if path == '/':
return reply(200, render('index.html'))
return reply(404, 'Not found.')
|
moniker-dns/contractor
|
contractor/cmd/contractor.py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from contractor.openstack.common import log as logging
from contractor import runner
from oslo.config import cfg
import sys
CONF = cfg.CONF
CONF.set_default('debug', True)
CONF.set_default('verbose', True)
def main():
environment = str(sys.argv[1])
config = str(sys.argv[2]) if len(sys.argv) > 2 else 'contractor.json'
logging.setup('contractor')
r = runner.Runner(config=config, environment=environment)
r.execute()
|
box/genty
|
test/test_genty_repeat.py
|
# coding: utf-8
from __future__ import unicode_literals
from genty import genty_repeat
from test.test_case_base import TestCase
class GentyRepeatTest(TestCase):
"""Tests for :mod:`box.test.genty.genty_repeat`."""
def test_repeat_decorator_decorates_function_with_appropriate_repeat_count(self):
@genty_repeat(15)
def some_func():
pass
self.assertEqual(15, some_func.genty_repeat_count) # pylint:disable=no-member
def test_repeat_decorator_decorates_method_with_appropriate_repeat_count(self):
class SomeClass(object):
@genty_repeat(13)
def some_func(self):
pass
some_instance = SomeClass()
self.assertEqual(13, some_instance.some_func.genty_repeat_count) # pylint:disable=no-member
def test_repeat_rejects_negative_counts(self):
with self.assertRaises(ValueError) as context:
@genty_repeat(-1)
def _():
pass
self.assertIn('Please pick a value >= 0', str(context.exception))
def test_repeat_allows_zero_iterations(self):
@genty_repeat(0)
def some_func():
pass
self.assertEqual(0, some_func.genty_repeat_count) # pylint:disable=no-member
|
DedMemez/ODS-August-2017
|
toontowngui/FrameColorPicker.py
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toontowngui.FrameColorPicker
from direct.gui.DirectGui import *
from otp.otpgui.ColorPicker import ColorPicker
from toontown.toonbase import TTLocalizer, ToontownGlobals
class FrameColorPicker(ColorPicker):
def __init__(self, minSat, maxSat, minVal, maxVal, frameCallback, text = TTLocalizer.ChooseAColor):
self.frameCallback = frameCallback
self.pickedColor = None
gui = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okImage = [ gui.find('**/ChtBx_OKBtn_' + name) for name in ('UP', 'DN', 'Rllvr') ]
self.frame = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=1.075, text=text, text_scale=0.09, text_pos=(0, 0.4))
self.okButton = DirectButton(self.frame, relief=None, image=okImage, pos=(0, 0, -0.375), text=TTLocalizer.lOK, text_scale=0.06, text_pos=(0, -0.1), command=self.__colorChosen)
ColorPicker.__init__(self, self.frame, minSat, maxSat, minVal, maxVal, self.__changeColor, (0.15, 0, 0.035))
gui.removeNode()
return
def destroy(self):
ColorPicker.destroy(self)
self.frame.destroy()
self.okButton.destroy()
del self.frame
del self.okButton
def __changeColor(self, color):
self.frame['geom_color'] = color
self.pickedColor = color
def __colorChosen(self):
self.frameCallback(self.pickedColor)
self.destroy()
|
karllessard/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/data_service_ops_ft_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops where servers are started late or preempted."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class DataServiceOpsTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherStop(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
results.append(next(iterator).numpy())
cluster.stop_dispatcher()
# After the dispatcher dies, the worker should continue providing the rest
# of the dataset's elements.
for _ in range(num_elements - 1):
results.append(next(iterator).numpy())
self.assertEqual(results, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBeforeReading(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartDuringReading(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.restart_dispatcher()
for elem in iterator:
results.append(elem.numpy())
self.assertEqual(list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherRestartBetweenIterations(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(100, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherManyRestarts(self):
cluster = self.create_cluster(num_workers=1)
num_elements_start = 10
num_elements_end = 15
datasets = []
for num_elements in range(num_elements_start, num_elements_end):
datasets.append(
self.make_distributed_range_dataset(num_elements, cluster))
cluster.restart_dispatcher()
for ds, num_elements in zip(datasets,
range(num_elements_start, num_elements_end)):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndWorkerRestart(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
cluster.restart_dispatcher()
cluster.restart_worker()
self.assertDatasetProduces(ds, list(range(num_elements)))
cluster.restart_dispatcher()
cluster.restart_worker()
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testDispatcherAndMultiWorkerRestart(self):
num_workers = 2
cluster = self.create_cluster(num_workers=num_workers)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.restart_worker(worker_index=worker_index)
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
cluster.restart_dispatcher()
for worker_index in range(num_workers):
cluster.restart_worker(worker_index=worker_index)
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), results)
@combinations.generate(test_base.eager_only_combinations())
def testStartServersLate(self):
# Test that the data service client performs retries instead of failing when
# the dataset is created before the master and worker are started.
try:
import portpicker # pylint: disable=g-import-not-at-top
dispatcher_port = portpicker.pick_unused_port()
except:
raise self.skipTest("Flakes in portpicker library do not represent "
"TensorFlow errors.")
cluster = self.create_cluster(
num_workers=1, dispatcher_port=dispatcher_port, start=False)
def start_servers():
time.sleep(0.5)
cluster.start_dispatcher()
cluster.start_workers()
start_servers_thread = threading.Thread(target=start_servers, daemon=True)
start_servers_thread.start()
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
results = [elem.numpy() for elem in ds]
self.assertEqual(list(range(num_elements)), results)
start_servers_thread.join()
@combinations.generate(test_base.eager_only_combinations())
def testAddWorkerMidJob(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
results = []
# Read halfway through the dataset.
for _ in range(num_elements // 2):
results.append(next(iterator).numpy())
cluster.add_worker()
# Wait for the new worker to register with the dispatcher.
while cluster.num_registered_workers() < 2:
time.sleep(10 / 1000) # 10ms
for elem in iterator:
results.append(elem.numpy())
self.assertCountEqual(2 * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(use_same_port=[True, False]),
data_service_test_base.all_cluster_configurations()))
def testRestartWorker(self, use_same_port, work_dir, fault_tolerant_mode):
cluster = self.create_cluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 100
ds = self.make_distributed_range_dataset(num_elements, cluster)
iterator = iter(ds)
# Read halfway through the dataset.
midpoint = num_elements // 2
for i in range(midpoint):
self.assertEqual(i, next(iterator).numpy())
# Stop the original worker and start a new one.
cluster.restart_worker(use_same_port=use_same_port)
# There may have been some elements prefetched from the first worker
# before it was stopped.
while True:
val = next(iterator).numpy()
if val == 0:
break
# The dataset starts over now that we read from the new worker.
# TODO(b/157086991): Iterate until end of sequence when we support
# detecting lost workers.
for i in range(1, num_elements // 2):
val = next(iterator).numpy()
self.assertEqual(i, val)
@combinations.generate(test_base.eager_only_combinations())
def testChangeProcessingModeAfterRestart(self):
cluster = self.create_cluster(num_workers=1)
num_elements = 100
range_dataset = dataset_ops.Dataset.range(num_elements)
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.target,
job_name="test"))
iterator = iter(ds)
for i in range(num_elements // 2):
self.assertEqual(i, next(iterator).numpy())
cluster.restart_dispatcher()
ds = range_dataset.apply(
data_service_ops.distribute(
processing_mode="distributed_epoch",
service=cluster.target,
job_name="test"))
with self.assertRaisesOpError("already an existing job with that name "
"using processing mode <parallel_epochs>"):
next(iter(ds)).numpy()
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(work_dir=[TMP_WORK_DIR, NO_WORK_DIR])))
def testDistributeLargeGraphThenRegisterWorker(self, work_dir):
cluster = self.create_cluster(
num_workers=0, work_dir=work_dir, fault_tolerant_mode=False)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
it = iter(ds)
cluster.add_worker()
self.assertAllEqual(next(it), tensor)
if __name__ == "__main__":
test.main()
|
foxish/test-infra
|
boskos/janitor/janitor.py
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up resources from gcp projects. """
import argparse
import collections
import datetime
import json
import os
import subprocess
import sys
# A resource that need to be cleared.
Resource = collections.namedtuple('Resource', 'group name subgroup condition managed tolerate')
DEMOLISH_ORDER = [
# [WARNING FROM KRZYZACY] : TOUCH THIS WITH CARE!
# ORDER REALLY MATTERS HERE!
# compute resources
Resource('compute', 'instances', None, 'zone', None, False),
Resource('compute', 'addresses', None, 'region', None, False),
Resource('compute', 'disks', None, 'zone', None, False),
Resource('compute', 'firewall-rules', None, None, None, False),
Resource('compute', 'routes', None, None, None, False),
Resource('compute', 'forwarding-rules', None, 'region', None, False),
Resource('compute', 'target-http-proxies', None, None, None, False),
Resource('compute', 'target-https-proxies', None, None, None, False),
Resource('compute', 'url-maps', None, None, None, False),
Resource('compute', 'backend-services', None, 'region', None, False),
Resource('compute', 'target-pools', None, 'region', None, False),
Resource('compute', 'health-checks', None, None, None, False),
Resource('compute', 'http-health-checks', None, None, None, False),
Resource('compute', 'instance-groups', None, 'zone', 'Yes', False),
Resource('compute', 'instance-groups', None, 'zone', 'No', False),
Resource('compute', 'instance-templates', None, None, None, False),
Resource('compute', 'networks', 'subnets', 'region', None, True),
Resource('compute', 'networks', None, '', None, False),
Resource('compute', 'routes', None, None, None, False),
# logging resources
# sinks does not have creationTimestamp yet
#Resource('logging', 'sinks', None, None, None, False),
]
def collect(project, age, resource, filt):
""" Collect a list of resources for each condition (zone or region).
Args:
project: The name of a gcp project.
age: Time cutoff from the creation of a resource.
resource: Definition of a type of gcloud resource.
filt: Filter clause for gcloud list command.
Returns:
A dict of condition : list of gcloud resource object.
Raises:
ValueError if json result from gcloud is invalid.
"""
col = collections.defaultdict(list)
cmd = ['gcloud', resource.group, '-q', resource.name]
if resource.subgroup:
cmd.append(resource.subgroup)
cmd.extend([
'list',
'--format=json(name,creationTimestamp.date(tz=UTC),zone,region,isManaged)',
'--filter=%s' % filt,
'--project=%s' % project])
print '%r' % cmd
for item in json.loads(subprocess.check_output(cmd)):
print '%r' % item
if 'name' not in item or 'creationTimestamp' not in item:
raise ValueError('%r' % item)
if resource.condition and resource.condition in item:
colname = item[resource.condition]
else:
colname = ''
if resource.managed:
if 'isManaged' not in item:
raise ValueError(resource.name, resource.managed)
else:
if resource.managed != item['isManaged']:
continue
# Unify datetime to use utc timezone.
created = datetime.datetime.strptime(item['creationTimestamp'], '%Y-%m-%dT%H:%M:%S')
print ('Found %r(%r), %r in %r, created time = %r' %
(resource.name, resource.subgroup, item['name'], colname, item['creationTimestamp']))
if created < age:
print ('Added to janitor list: %r(%r), %r' %
(resource.name, resource.subgroup, item['name']))
col[colname].append(item['name'])
return col
def clear_resources(project, cols, resource, rate_limit):
"""Clear a collection of resource, from collect func above.
Args:
project: The name of a gcp project.
cols: A dict of collection of resource.
resource: Definition of a type of gcloud resource.
rate_limit: how many resources to delete per gcloud delete call
Returns:
0 if no error
1 if deletion command fails
"""
err = 0
for col, items in cols.items():
if ARGS.dryrun:
print ('Resource type %r(%r) to be deleted: %r' %
(resource.name, resource.subgroup, list(items)))
continue
manage_key = {'Yes':'managed', 'No':'unmanaged'}
# construct the customized gcloud command
base = ['gcloud', resource.group, '-q', resource.name]
if resource.subgroup:
base.append(resource.subgroup)
if resource.managed:
base.append(manage_key[resource.managed])
base.append('delete')
base.append('--project=%s' % project)
if resource.condition:
if col:
base.append('--%s=%s' % (resource.condition, col))
else:
base.append('--global')
print 'going to delete %d %s' % (len(items), resource.name)
# try to delete at most $rate_limit items at a time
for idx in xrange(0, len(items), rate_limit):
clean = items[idx:idx+rate_limit]
print 'Call %r' % (base + list(clean))
try:
subprocess.check_call(base + list(clean))
except subprocess.CalledProcessError as exc:
if not resource.tolerate:
err = 1
print >>sys.stderr, 'Error try to delete resources: %r' % exc
return err
def clean_gke_cluster(project, age, filt):
"""Clean up potential leaking gke cluster"""
# a cluster can be created in one of those three endpoints
endpoints = [
'https://test-container.sandbox.googleapis.com/', # test
'https://staging-container.sandbox.googleapis.com/', # staging
'https://container.googleapis.com/', # prod
]
err = 0
for endpoint in endpoints:
os.environ['CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER'] = endpoint
print "checking endpoint %s" % endpoint
cmd = [
'gcloud', 'container', '-q', 'clusters', 'list',
'--project=%s' % project,
'--filter=%s' % filt,
'--format=json(name,createTime,zone)'
]
print 'running %s' % cmd
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as exc:
print >>sys.stderr, 'Cannot reach endpoint %s with %r, continue' % (endpoint, exc)
continue
for item in json.loads(output):
print 'cluster info: %r' % item
if 'name' not in item or 'createTime' not in item or 'zone' not in item:
print >>sys.stderr, 'name, createTime and zone must present'
raise ValueError('%r' % item)
# The raw createTime string looks like 2017-08-30T18:33:14+00:00
# Which python 2.7 does not support timezones.
# Since age is already in UTC time we'll just strip the timezone part
item['createTime'] = item['createTime'].split('+')[0]
created = datetime.datetime.strptime(
item['createTime'], '%Y-%m-%dT%H:%M:%S')
if created < age:
print ('Found stale gke cluster %r in %r, created time = %r' %
(item['name'], endpoint, item['createTime']))
delete = [
'gcloud', 'container', '-q', 'clusters', 'delete',
item['name'],
'--project=%s' % project,
'--zone=%s' % item['zone'],
]
try:
print 'running %s' % delete
subprocess.check_call(delete)
except subprocess.CalledProcessError as exc:
err = 1
print >>sys.stderr, 'Error try to delete cluster %s: %r' % (item['name'], exc)
return err
def main(project, days, hours, filt, rate_limit):
""" Clean up resources from a gcp project based on it's creation time
Args:
project: The name of a gcp project.
days/hours: days/hours of maximum lifetime of a gcp resource.
filt: Resource instance filters when query.
Returns:
0 if no error
1 if list or delete command fails
"""
print '[=== Start Janitor on project %r ===]' % project
err = 0
age = datetime.datetime.utcnow() - datetime.timedelta(days=days, hours=hours)
for res in DEMOLISH_ORDER:
print 'Try to search for %r with condition %r' % (res.name, res.condition)
try:
col = collect(project, age, res, filt)
if col:
err |= clear_resources(project, col, res, rate_limit)
except (subprocess.CalledProcessError, ValueError):
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to list resource %r from project %r' % (res.name, project)
# try to clean leaking gke cluster
try:
err |= clean_gke_cluster(project, age, filt)
except ValueError:
err |= 1 # keep clean the other resource
print >>sys.stderr, 'Fail to clean up cluster from project %r' % project
print '[=== Finish Janitor on project %r with status %r ===]' % (project, err)
sys.exit(err)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Clean up resources from an expired project')
PARSER.add_argument('--project', help='Project to clean', required=True)
PARSER.add_argument(
'--days', type=int,
help='Clean items more than --days old (added to --hours)')
PARSER.add_argument(
'--hours', type=float,
help='Clean items more than --hours old (added to --days)')
PARSER.add_argument(
'--filter',
default='name !~ ^default',
help='Filter down to these instances')
PARSER.add_argument(
'--dryrun',
default=False,
action='store_true',
help='list but not delete resources')
PARSER.add_argument(
'--ratelimit', type=int, default=50,
help='Max number of resources to bulk clear in one gcloud delete call')
ARGS = PARSER.parse_args()
# We want to allow --days=0 and --hours=0, so check against None instead.
if ARGS.days is None and ARGS.hours is None:
print >>sys.stderr, 'must specify --days and/or --hours'
sys.exit(1)
main(ARGS.project, ARGS.days or 0, ARGS.hours or 0, ARGS.filter, ARGS.ratelimit)
|
yuanlisky/work
|
suffixDict/useHanLP_segPro.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@time: 2017/2/20 9:18
@author: yl
"""
import re
# 处理HanLP的分词结果:① 过滤1个字的词语;② 选择词性为’ns’,’nsf’,’nz’的词语。
class SegPro(object):
def __init__(self):
pass
def process(self, sourcefile, resultfile, tag, filterlength=1):
'''
:param sourcefile: HanLP分词结果文件
:param resultfile: 对HanLP分词结果进行处理,输出txt文件
:param tag: 过滤词性不为tag的词语,tag为列表
:param filterlength:过滤长度为filterlength的词语
:return:
'''
f = open(sourcefile,'rb')
wr = open(resultfile, 'a+', encoding='utf8')
t = tag
for lineno, line in enumerate(f, 1):
line = line.strip().decode('utf-8')
s = line.split(' ')[0]
r = re.sub("([^\u4E00-\u9FD5])", '', s)
if len(r) == filterlength:
continue
# if ('/nt' in s or '/nz' in s or '/ns' in s or '/nsf' in s):
if self.tagging_filter(s, t):
wr.write(s + '\n')
print('Processing line: ', lineno)
f.close()
wr.close()
print('Done!')
def tagging_filter(self, s, tag):
for x in tag:
if x in s:
return 1
return 0
if __name__ == '__main__':
tag = ['/nt', '/nz', '/ns', '/nsf']
segpro = SegPro()
segpro.process('./result/BJplacePro.txt', './BJ1.txt', tag, filterlength=1)
|
skuda/client-python
|
kubernetes/test/test_v1_persistent_volume_claim.py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
class TestV1PersistentVolumeClaim(unittest.TestCase):
""" V1PersistentVolumeClaim unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1PersistentVolumeClaim(self):
"""
Test V1PersistentVolumeClaim
"""
model = kubernetes.client.models.v1_persistent_volume_claim.V1PersistentVolumeClaim()
if __name__ == '__main__':
unittest.main()
|
tensorflow/datasets
|
tensorflow_datasets/question_answering/mctaco_test.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mctaco dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.question_answering import mctaco
class MctacoTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = mctaco.Mctaco
DL_EXTRACT_RESULT = {
"validation": "dev_3783.tsv",
"test": "test_9942.tsv",
}
SPLITS = {
"validation": 5,
"test": 3,
}
if __name__ == "__main__":
testing.test_main()
|
jamslevy/gsoc
|
thirdparty/google_appengine/google/appengine/api/users.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python datastore class User to be used as a datastore data type.
Classes defined here:
User: object representing a user.
Error: base exception type
UserNotFoundError: UserService exception
RedirectTooLongError: UserService exception
NotAllowedError: UserService exception
"""
import os
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import user_service_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base User error type."""
class UserNotFoundError(Error):
"""Raised by User.__init__() when there's no email argument and no user is
logged in."""
class RedirectTooLongError(Error):
"""Raised by UserService calls if the generated redirect URL was too long.
"""
class NotAllowedError(Error):
"""Raised by UserService calls if the requested redirect URL is not allowed.
"""
class User(object):
"""A user.
We provide the email address, nickname, auth domain, and id for a user.
A nickname is a human-readable string which uniquely identifies a Google
user, akin to a username. It will be an email address for some users, but
not all.
"""
__user_id = None
def __init__(self, email=None, _auth_domain=None, _user_id=None):
"""Constructor.
Args:
email: An optional string of the user's email address. It defaults to
the current user's email address.
Raises:
UserNotFoundError: Raised if the user is not logged in and the email
argument is empty.
"""
if _auth_domain is None:
_auth_domain = os.environ.get('AUTH_DOMAIN')
else:
assert email is not None
assert _auth_domain
if email is None:
assert 'USER_EMAIL' in os.environ
email = os.environ['USER_EMAIL']
if _user_id is None and 'USER_ID' in os.environ:
_user_id = os.environ['USER_ID']
if not email:
raise UserNotFoundError
self.__email = email
self.__auth_domain = _auth_domain
self.__user_id = _user_id or None
def nickname(self):
"""Return this user's nickname.
The nickname will be a unique, human readable identifier for this user
with respect to this application. It will be an email address for some
users, but not all.
"""
if (self.__email and self.__auth_domain and
self.__email.endswith('@' + self.__auth_domain)):
suffix_len = len(self.__auth_domain) + 1
return self.__email[:-suffix_len]
else:
return self.__email
def email(self):
"""Return this user's email address."""
return self.__email
def user_id(self):
"""Return either a permanent unique identifying string or None.
If the email address was set explicity, this will return None.
"""
return self.__user_id
def auth_domain(self):
"""Return this user's auth domain."""
return self.__auth_domain
def __unicode__(self):
return unicode(self.nickname())
def __str__(self):
return str(self.nickname())
def __repr__(self):
if self.__user_id:
return "users.User(email='%s',_user_id='%s')" % (self.email(),
self.user_id())
else:
return "users.User(email='%s')" % self.email()
def __hash__(self):
return hash((self.__email, self.__auth_domain))
def __cmp__(self, other):
if not isinstance(other, User):
return NotImplemented
return cmp((self.__email, self.__auth_domain),
(other.__email, other.__auth_domain))
def create_login_url(dest_url):
"""Computes the login URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once login is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLoginURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
elif (e.application_error ==
user_service_pb.UserServiceError.NOT_ALLOWED):
raise NotAllowedError
else:
raise e
return resp.value()
CreateLoginURL = create_login_url
def create_logout_url(dest_url):
"""Computes the logout URL for this request and specified destination URL.
Args:
dest_url: String that is the desired final destination URL for the user
once logout is complete. If 'dest_url' does not have a host
specified, we will use the host from the current request.
Returns:
string
"""
req = user_service_pb.StringProto()
resp = user_service_pb.StringProto()
req.set_value(dest_url)
try:
apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
raise RedirectTooLongError
else:
raise e
return resp.value()
CreateLogoutURL = create_logout_url
def get_current_user():
try:
return User()
except UserNotFoundError:
return None
GetCurrentUser = get_current_user
def is_current_user_admin():
"""Return true if the user making this request is an admin for this
application, false otherwise.
We specifically make this a separate function, and not a member function of
the User class, because admin status is not persisted in the datastore. It
only exists for the user making this request right now.
"""
return (os.environ.get('USER_IS_ADMIN', '0')) == '1'
IsCurrentUserAdmin = is_current_user_admin
|
Cyber-Neuron/inception_v3
|
inception/inception/inception_eval.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to evaluate Inception on a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train',
"""Directory where to read model checkpoints.""")
# Flags governing the frequency of the eval.
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
# Flags governing the data used for the eval.
tf.app.flags.DEFINE_integer('num_examples', 50000,
"""Number of examples to run. Note that the eval """
"""ImageNet dataset contains 50000 examples.""")
tf.app.flags.DEFINE_string('subset', 'validation',
"""Either 'validation' or 'train'.""")
def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
"""Runs Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_1_op: Top 1 op.
top_5_op: Top 5 op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
ckpt.model_checkpoint_path))
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Succesfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
# Counts the number of correct predictions.
count_top_1 = 0.0
count_top_5 = 0.0
total_sample_count = num_iter * FLAGS.batch_size
step = 0
print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))
start_time = time.time()
while step < num_iter and not coord.should_stop():
top_1, top_5 = sess.run([top_1_op, top_5_op])
#print('TOP 1: %s \nTOP 5: %s' % (top_1,top_5))
count_top_1 += np.sum(top_1)
count_top_5 += np.sum(top_5)
step += 1
if step % 20 == 0:
duration = time.time() - start_time
sec_per_batch = duration / 20.0
examples_per_sec = FLAGS.batch_size / sec_per_batch
print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
'sec/batch)' % (datetime.now(), step, num_iter,
examples_per_sec, sec_per_batch))
start_time = time.time()
# Compute precision @ 1.
precision_at_1 = count_top_1 / total_sample_count
recall_at_5 = count_top_5 / total_sample_count
print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %
(datetime.now(), precision_at_1, recall_at_5, total_sample_count))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)
summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(dataset):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default():
# Get images and labels from the dataset.
images, labels, _ = image_processing.inputs(dataset)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Build a Graph that computes the logits predictions from the
# inference model.
logits, _ = inception.inference(images, num_classes)
# Calculate predictions.
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
|
tensorflow/neural-structured-learning
|
research/gam/gam/models/gcn.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph Convolution Networks implementation adapted from https://github.com/tkipf/gcn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .models_base import glorot
from .models_base import Model
import tensorflow as tf
# Global unique layer ID dictionary for layer name assignment.
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return tf.SparseTensor(
indices=pre_out.indices,
values=pre_out.values / keep_prob,
dense_shape=pre_out.dense_shape)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class GCN(Model):
"""Graph Convolution Networks.
Attributes:
input_dim: Integer representing the number of input features.
output_dim: Integer representing the number of classes.
hidden: Integer representing the number of hidden units in the first layer
of the network.
dropout: Float representing the dropout probability during training.
aggregation: String representing an aggregation operation, that is applied
on the two inputs of the agreement model, after they are encoded through
the convolution layers. See superclass attributes for details.
activation: An activation function to be applied to the outputs of each
fully connected layer of the aggregation network.
is_binary_classification: Boolean specifying if this is model for binary
classification. If so, it uses a different loss function and returns
predictions with a single dimension, batch size.
name: String representing the model name.
"""
def __init__(self,
input_dim,
output_dim,
hidden,
dropout=0.5,
aggregation=None,
hidden_aggregation=(),
activation=tf.nn.leaky_relu,
is_binary_classification=False,
name='GCN'):
super(GCN, self).__init__(
aggregation=aggregation,
hidden_aggregation=hidden_aggregation,
activation=activation)
dropout = 0.5 if dropout is None else dropout
self.input_dim = input_dim
self.output_dim = output_dim
self.num_supports = 1
self.hidden = hidden
self.dropout = dropout
self.name = name
self.is_binary_classification = is_binary_classification
def get_encoding_and_params(self, inputs, is_train, support,
num_features_nonzero, **unused_kwargs):
"""Creates the model hidden representations and prediction ops.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
inputs: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A boolean placeholder specifying if this is a training or
testing setting.
support: TODO(dattias, kvis-google): add.
num_features_nonzero: Number of non-zero features.
**unused_kwargs: Other unused keyword arguments.
Returns:
encoding: A tensor containing an encoded batch of samples. The first
dimension corresponds to the batch size.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
# Build layers.
with tf.variable_scope(self.name + '/encoding'):
hidden, reg_params = self._construct_encoding(inputs, is_train, support,
num_features_nonzero)
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return hidden, all_vars, reg_params
def _construct_encoding(self, inputs, is_train, support,
num_features_nonzero):
"""Create weight variables."""
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_1 = GraphConvolution(
input_dim=self.input_dim,
output_dim=self.hidden,
activation=tf.nn.relu,
dropout=dropout,
sparse_inputs=True,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution1')
encoding = layer_1(inputs)
reg_params = layer_1.vars
return encoding, reg_params
def get_predictions_and_params(self, encoding, is_train, **kwargs):
"""Creates the model prediction op.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
encoding: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A placeholder representing a boolean value that specifies if
this model will be used for training or for test.
**kwargs: Other keyword arguments.
Returns:
predictions: A tensor of logits. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
reg_params = {}
support = kwargs['support']
num_features_nonzero = kwargs['num_features_nonzero']
# Build layers.
with tf.variable_scope(self.name + '/prediction'):
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_2 = GraphConvolution(
input_dim=self.hidden,
output_dim=self.output_dim,
activation=lambda x: x,
dropout=dropout,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution2')
predictions = layer_2(encoding)
if self.is_binary_classification:
predictions = predictions[:, 0]
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return predictions, all_vars, reg_params
def get_loss(self,
predictions,
targets,
name_scope='loss',
reg_params=None,
**kwargs):
"""Returns a loss between the provided targets and predictions.
For binary classification, this loss is sigmoid cross entropy. For
multi-class classification, it is softmax cross entropy.
A weight decay loss is also added to the parameters passed in reg_params.
Args:
predictions: A tensor of predictions. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
targets: A tensor of targets of shape (num_samples,), where each row
contains the label index of the corresponding sample.
name_scope: A string containing the name scope used in TensorFlow.
reg_params: A dictonary of parameters, mapping from name to parameter, for
the variables to be included in the weight decay loss. If None, no
weight decay is applied.
**kwargs: Keyword arguments, potentially containing the weight of the
regularization term, passed under the name `weight_decay`. If this is
not provided, it defaults to 0.0.
Returns:
loss: The cummulated loss value.
"""
reg_params = reg_params if reg_params is not None else {}
weight_decay = kwargs['weight_decay'] if 'weight_decay' in kwargs else None
with tf.name_scope(name_scope):
# Cross entropy error.
if self.is_binary_classification:
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=predictions))
else:
loss = tf.losses.softmax_cross_entropy(targets, predictions)
# Weight decay loss.
if weight_decay is not None:
for var in reg_params.values():
loss = loss + weight_decay * tf.nn.l2_loss(var)
return loss
def normalize_predictions(self, predictions):
"""Converts predictions to probabilities.
Args:
predictions: A tensor of logits. For multiclass classification its shape
is (num_samples, num_classes), where the second dimension contains a
logit per class. For binary classification, its shape is (num_samples,),
where each element is the probability of class 1 for that sample.
Returns:
A tensor of the same shape as predictions, with values between [0, 1]
representing probabilities.
"""
if self.is_binary_classification:
return tf.nn.sigmoid(predictions)
return tf.nn.softmax(predictions, axis=-1)
class GraphConvolution(object):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
support,
num_features_nonzero,
dropout=0.,
sparse_inputs=False,
activation=tf.nn.relu,
bias=False,
featureless=False,
name=None):
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
self.dropout = dropout
self.act = activation
self.support = support
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# Helper variable for sparse dropout.
self.num_features_nonzero = num_features_nonzero
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = tf.get_variable(
name='weights', initializer=glorot([input_dim, output_dim]))
if self.bias:
self.vars['bias'] = tf.get_variable(
name='bias', initializer=tf.zeros(shape=[output_dim]))
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
def _call(self, inputs):
"""Run over inputs."""
x = inputs
# Dropout.
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# Convolve.
if not self.featureless:
pre_sup = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights']
support = dot(self.support, pre_sup, sparse=True)
output = support
# Bias.
if self.bias:
output += self.vars['bias']
return self.act(output)
|
vijayendrabvs/hap
|
neutron/tests/unit/openvswitch/test_ovs_lib.py
|
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import jsonutils
from neutron.openstack.common import uuidutils
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class TestBaseOVS(base.BaseTestCase):
def setUp(self):
super(TestBaseOVS, self).setUp()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.br_name = 'bridge1'
def test_add_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.add_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--may-exist",
"add-br", self.br_name])
def test_delete_bridge(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.ovs.delete_bridge(self.br_name)
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
self.br_name])
def test_bridge_exists_returns_true(self):
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
self.assertTrue(self.ovs.bridge_exists(self.br_name))
mock_vsctl.assert_called_with(['br-exists', self.br_name],
check_error=True)
def test_bridge_exists_returns_false_for_exit_code_2(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 2\n')):
self.assertFalse(self.ovs.bridge_exists('bridge1'))
def test_bridge_exists_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.bridge_exists('bridge1')
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
port_name = 'bar'
with mock.patch.object(self.ovs, 'run_vsctl',
return_value=self.br_name) as mock_vsctl:
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
self.assertEqual(bridge, self.br_name)
mock_vsctl.assert_called_with(['port-to-br', port_name],
check_error=True)
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError('Exit code: 1\n')):
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
with mock.patch.object(self.ovs, 'run_vsctl',
side_effect=RuntimeError()):
with testtools.ExpectedException(RuntimeError):
self.ovs.get_bridge_name_for_port_name('bridge1')
def _test_port_exists(self, br_name, result):
with mock.patch.object(self.ovs,
'get_bridge_name_for_port_name',
return_value=br_name):
self.assertEqual(self.ovs.port_exists('bar'), result)
def test_port_exists_returns_true_for_bridge_name(self):
self._test_port_exists(self.br_name, True)
def test_port_exists_returns_false_for_none(self):
self._test_port_exists(None, False)
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=10"
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
root_helper=self.root_helper)
def test_del_controller(self):
self.br.del_controller()
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_get_controller(self):
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
root_helper=self.root_helper)
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self.execute.assert_called_once_with(
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
"protocols=%s" % protocols],
root_helper=self.root_helper)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def _test_delete_port(self, exp_timeout=None):
exp_timeout_str = self._build_timeout_opt(exp_timeout)
pname = "tap5"
self.br.delete_port(pname)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = OrderedDict([('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = OrderedDict([('priority', 1),
('actions', 'normal')])
flow_dict_3 = OrderedDict([('priority', 2),
('actions', 'drop')])
flow_dict_4 = OrderedDict([('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (vid, ofport, lsw_id)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,nw_src=%s,arp,actions=drop" % cidr],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_add_flow_timeout_set(self):
flow_dict = OrderedDict([('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_add_flow_default_priority(self):
flow_dict = OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self.execute.assert_called_once_with(
["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,priority=1,actions=normal"],
process_input=None,
root_helper=self.root_helper)
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
self.execute.return_value = ofport
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
root_helper=self.root_helper)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = datapath_id
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.execute.assert_called_once_with(
["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper,
process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id],
process_input=None, root_helper=self.root_helper),
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid],
process_input=None, root_helper=self.root_helper),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_defer_apply_flows(self):
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
flow_expr.side_effect = ['added_flow_1', 'added_flow_2',
'deleted_flow_1']
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_1')
self.br.defer_apply_on()
self.br.add_flow(flow='add_flow_2')
self.br.delete_flows(flow='delete_flow_1')
self.br.defer_apply_off()
flow_expr.assert_has_calls([
mock.call({'flow': 'add_flow_1'}, 'add'),
mock.call({'flow': 'add_flow_2'}, 'add'),
mock.call({'flow': 'delete_flow_1'}, 'del')
])
run_ofctl.assert_has_calls([
mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'),
mock.call('del-flows', ['-'], 'deleted_flow_1\n')
])
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = "6"
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper), None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
# Each element is a tuple of (expected mock call, return_value)
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(mock.call(command, root_helper=self.root_helper),
None),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport)
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
"%s\n" % pname),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper),
external_ids),
(mock.call(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper),
ofport),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# A vif port on another bridge:
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
# Non-vif port on this bridge:
['tun22', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\ntun22'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'tap99\n'),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,external_ids,ofport",
"list", "Interface"],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
'\n'.join((iface for iface, tag in data))),
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=name,tag",
"list", "Port"],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
root_helper=self.root_helper)
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
self.execute.return_value = 'br-int'
exp_timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.execute.assert_called_once_with(
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
root_helper=root_helper)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
self.execute.side_effect = Exception
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.execute.assert_called_once_with(
["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper),
RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
self.execute.return_value = 'br-int\nbr-ex\n'
timeout_str = self._build_timeout_opt(exp_timeout)
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.execute.assert_called_once_with(
["ovs-vsctl", timeout_str, "list-br"],
root_helper=root_helper)
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
new_timeout = 5
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
self._test_get_bridges(new_timeout)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, "--format=json",
"--", "--columns=external_ids,name,ofport",
"find", "Interface",
'external_ids:iface-id="%s"' % iface_id],
root_helper=self.root_helper),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
expected_calls_and_values.append(
(mock.call(["ovs-vsctl", self.TO,
"iface-to-br", data[0][headings.index('name')]],
root_helper=self.root_helper),
br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
if mac:
external_ids.append(["attached-mac", mac])
data = [[["map", external_ids], "tap99",
ofport if ofport else '["set",[]]']]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port)
return
self.assertEqual(vif_port.vif_id, 'tap99id')
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
self.assertEqual(vif_port.port_name, 'tap99')
self.assertEqual(vif_port.ofport, ofport)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_mac(self):
self._test_get_vif_port_by_id_with_data(ofport=1)
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def _check_ovs_vxlan_version(self, installed_usr_version,
installed_klm_version,
installed_kernel_version,
expecting_ok):
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_klm_version'
) as klm_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_ovs_usr_version'
) as usr_cmd:
with mock.patch(
'neutron.agent.linux.ovs_lib.get_installed_kernel_version'
) as kernel_cmd:
try:
klm_cmd.return_value = installed_klm_version
usr_cmd.return_value = installed_usr_version
kernel_cmd.return_value = installed_kernel_version
ovs_lib.check_ovs_vxlan_version(root_helper='sudo')
version_ok = True
except SystemError:
version_ok = False
self.assertEqual(version_ok, expecting_ok)
def test_check_minimum_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
def test_check_future_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) + 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=True)
def test_check_fail_version(self):
install_ver = str(float(constants.MINIMUM_OVS_VXLAN_VERSION) - 0.01)
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(install_ver, install_ver,
min_kernel_ver, expecting_ok=False)
def test_check_fail_no_version(self):
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(None, None,
min_kernel_ver,
expecting_ok=False)
def test_check_fail_klm_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = OVS_LINUX_KERN_VERS_WITHOUT_VXLAN
install_ver = str(float(min_vxlan_ver) - 0.01)
self._check_ovs_vxlan_version(min_vxlan_ver,
install_ver,
min_kernel_ver,
expecting_ok=False)
def test_check_pass_kernel_version(self):
min_vxlan_ver = constants.MINIMUM_OVS_VXLAN_VERSION
min_kernel_ver = constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN
self._check_ovs_vxlan_version(min_vxlan_ver, min_vxlan_ver,
min_kernel_ver, expecting_ok=True)
|
our-city-app/oca-backend
|
src/rogerthat/bizz/communities/geo_fence.py
|
# -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.appengine.ext.ndb.model import GeoPt
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.communities.models import CommunityGeoFence, CommunityLocation, GeoFenceGeometry
from rogerthat.bizz.communities.to import CommunityGeoFenceTO
def get_geo_fence(community_id):
# type: (int) -> CommunityGeoFence
key = CommunityGeoFence.create_key(community_id)
fence = key.get() # type: CommunityGeoFence
if not fence:
fence = CommunityGeoFence(key=key)
fence.country = get_community(community_id).country
return fence
def update_geo_fence(community_id, data):
# type: (int, CommunityGeoFenceTO) -> CommunityGeoFence
fence = get_geo_fence(community_id)
fence.defaults = None
if data.defaults:
fence.defaults = CommunityLocation(locality=data.defaults.locality,
postal_code=data.defaults.postal_code)
fence.geometry = None
if data.geometry:
fence.geometry = GeoFenceGeometry(center=GeoPt(data.geometry.center.lat, data.geometry.center.lon),
max_distance=data.geometry.max_distance)
fence.put()
return fence
|
grycap/scar
|
examples/mask-detector-workflow/blurry-faces/src/auto_blur_image.py
|
# author: Asmaa Mirkhan ~ 2019
import os
import argparse
import cv2 as cv
from DetectorAPI import DetectorAPI
def blurBoxes(image, boxes):
"""
Argument:
image -- the image that will be edited as a matrix
boxes -- list of boxes that will be blurred, each box must be int the format (x_top_left, y_top_left, x_bottom_right, y_bottom_right)
Returns:
image -- the blurred image as a matrix
"""
for box in boxes:
# unpack each box
x1, y1, x2, y2 = [d for d in box]
# crop the image due to the current box
sub = image[y1:y2, x1:x2]
# apply GaussianBlur on cropped area
blur = cv.blur(sub, (10, 10))
# paste blurred image on the original image
image[y1:y2, x1:x2] = blur
return image
def main(args):
# assign model path and threshold
model_path = args.model_path
threshold = args.threshold
# create detection object
odapi = DetectorAPI(path_to_ckpt=model_path)
# open image
image = cv.imread(args.input_image)
# real face detection
boxes, scores, classes, num = odapi.processFrame(image)
# filter boxes due to threshold
# boxes are in (x_top_left, y_top_left, x_bottom_right, y_bottom_right) format
boxes = [boxes[i] for i in range(0, num) if scores[i] > threshold]
# apply blurring
image = blurBoxes(image, boxes)
# # show image
# cv.imshow('blurred', image)
# if image will be saved then save it
if args.output_image:
cv.imwrite(args.output_image, image)
print('Image has been saved successfully at', args.output_image,
'path')
else:
cv.imshow('blurred', image)
# when any key has been pressed then close window and stop the program
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == "__main__":
# creating argument parser
parser = argparse.ArgumentParser(description='Image blurring parameters')
# adding arguments
parser.add_argument('-i',
'--input_image',
help='Path to your image',
type=str,
required=True)
parser.add_argument('-m',
'--model_path',
default='/opt/blurry-faces/face_model/face.pb',
help='Path to .pb model',
type=str)
parser.add_argument('-o',
'--output_image',
help='Output file path',
type=str)
parser.add_argument('-t',
'--threshold',
help='Face detection confidence',
default=0.7,
type=float)
args = parser.parse_args()
print(args)
# if input image path is invalid then stop
assert os.path.isfile(args.input_image), 'Invalid input file'
# if output directory is invalid then stop
if args.output_image:
assert os.path.isdir(os.path.dirname(
args.output_image)), 'No such directory'
main(args)
|
persandstrom/home-assistant
|
homeassistant/components/homekit/type_fans.py
|
"""Class to hold all light accessories."""
import logging
from pyhap.const import CATEGORY_FAN
from homeassistant.components.fan import (
ATTR_DIRECTION, ATTR_OSCILLATING, DIRECTION_FORWARD, DIRECTION_REVERSE,
DOMAIN, SERVICE_OSCILLATE, SERVICE_SET_DIRECTION, SUPPORT_DIRECTION,
SUPPORT_OSCILLATE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, SERVICE_TURN_OFF,
SERVICE_TURN_ON, STATE_OFF, STATE_ON)
from . import TYPES
from .accessories import HomeAccessory
from .const import (
CHAR_ACTIVE, CHAR_ROTATION_DIRECTION, CHAR_SWING_MODE, SERV_FANV2)
_LOGGER = logging.getLogger(__name__)
@TYPES.register('Fan')
class Fan(HomeAccessory):
"""Generate a Fan accessory for a fan entity.
Currently supports: state, speed, oscillate, direction.
"""
def __init__(self, *args):
"""Initialize a new Light accessory object."""
super().__init__(*args, category=CATEGORY_FAN)
self._flag = {CHAR_ACTIVE: False,
CHAR_ROTATION_DIRECTION: False,
CHAR_SWING_MODE: False}
self._state = 0
self.chars = []
features = self.hass.states.get(self.entity_id) \
.attributes.get(ATTR_SUPPORTED_FEATURES)
if features & SUPPORT_DIRECTION:
self.chars.append(CHAR_ROTATION_DIRECTION)
if features & SUPPORT_OSCILLATE:
self.chars.append(CHAR_SWING_MODE)
serv_fan = self.add_preload_service(SERV_FANV2, self.chars)
self.char_active = serv_fan.configure_char(
CHAR_ACTIVE, value=0, setter_callback=self.set_state)
if CHAR_ROTATION_DIRECTION in self.chars:
self.char_direction = serv_fan.configure_char(
CHAR_ROTATION_DIRECTION, value=0,
setter_callback=self.set_direction)
if CHAR_SWING_MODE in self.chars:
self.char_swing = serv_fan.configure_char(
CHAR_SWING_MODE, value=0, setter_callback=self.set_oscillating)
def set_state(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set state to %d', self.entity_id, value)
self._flag[CHAR_ACTIVE] = True
service = SERVICE_TURN_ON if value == 1 else SERVICE_TURN_OFF
params = {ATTR_ENTITY_ID: self.entity_id}
self.hass.services.call(DOMAIN, service, params)
def set_direction(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set direction to %d', self.entity_id, value)
self._flag[CHAR_ROTATION_DIRECTION] = True
direction = DIRECTION_REVERSE if value == 1 else DIRECTION_FORWARD
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_DIRECTION: direction}
self.hass.services.call(DOMAIN, SERVICE_SET_DIRECTION, params)
def set_oscillating(self, value):
"""Set state if call came from HomeKit."""
_LOGGER.debug('%s: Set oscillating to %d', self.entity_id, value)
self._flag[CHAR_SWING_MODE] = True
oscillating = True if value == 1 else False
params = {ATTR_ENTITY_ID: self.entity_id,
ATTR_OSCILLATING: oscillating}
self.hass.services.call(DOMAIN, SERVICE_OSCILLATE, params)
def update_state(self, new_state):
"""Update fan after state change."""
# Handle State
state = new_state.state
if state in (STATE_ON, STATE_OFF):
self._state = 1 if state == STATE_ON else 0
if not self._flag[CHAR_ACTIVE] and \
self.char_active.value != self._state:
self.char_active.set_value(self._state)
self._flag[CHAR_ACTIVE] = False
# Handle Direction
if CHAR_ROTATION_DIRECTION in self.chars:
direction = new_state.attributes.get(ATTR_DIRECTION)
if not self._flag[CHAR_ROTATION_DIRECTION] and \
direction in (DIRECTION_FORWARD, DIRECTION_REVERSE):
hk_direction = 1 if direction == DIRECTION_REVERSE else 0
if self.char_direction.value != hk_direction:
self.char_direction.set_value(hk_direction)
self._flag[CHAR_ROTATION_DIRECTION] = False
# Handle Oscillating
if CHAR_SWING_MODE in self.chars:
oscillating = new_state.attributes.get(ATTR_OSCILLATING)
if not self._flag[CHAR_SWING_MODE] and \
oscillating in (True, False):
hk_oscillating = 1 if oscillating else 0
if self.char_swing.value != hk_oscillating:
self.char_swing.set_value(hk_oscillating)
self._flag[CHAR_SWING_MODE] = False
|
javaor/PythonSpider
|
scrapy_outlink.py
|
# -*- coding: utf-8 -*-
import re
import datetime
import random
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
pages = set()
all_exernal_links = set()
all_internal_links = set()
def get_internal_link(bs_obj, include_url):
"""
获取页面所有的内链接
:param bs_obj:
:param include_url:
:return:
"""
include_url = urlparse(include_url).scheme +"://" +urlparse(include_url).netloc
internal_links = []
for link in bs_obj.findAll("a",href = re.compile("^(/|.*"+include_url+")")):
if link.attrs["href"] is not None:
if link.attrs["href"] not in internal_links:
if link.attrs["href"].startswith("/"):
internal_links.append(include_url+link.attrs["href"])
else:
internal_links.append(link.attrs["href"])
return internal_links
def get_external_link(bs_obj, exclude_url):
external_links = []
for link in bs_obj.findAll("a",href=re.compile("^(http|www)((?!"+exclude_url+").)*$")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in external_links:
external_links.append(link.attrs['href'])
return external_links
def get_random_external_link(starting_page):
html = requests.get(starting_page)
bs_obj = BeautifulSoup(html.text)
external_links = get_external_link(bs_obj,urlparse(starting_page).netloc)
if not external_links:
print("No external links, looking around the site for one")
domain = urlparse(starting_page).scheme+"://"+urlparse(starting_page).netloc
internal_links = get_internal_link(bs_obj,domain)
if not internal_links:
return get_random_external_link(internal_links[random.randint(0,len(internal_links)-1)])
else:
print("----------stop--------------")
else:
return external_links[random.randint(0,len(external_links))-1]
def get_all_external_links(site_url):
html = requests.get(site_url)
bs_obj = BeautifulSoup(html.text)
internal_links = get_internal_link(bs_obj,site_url)
external_links = get_external_link(bs_obj,site_url)
for link in external_links:
if link not in all_exernal_links:
all_exernal_links.add(link)
print(link)
for link in internal_links:
if link not in all_internal_links:
all_internal_links.add(link)
print(link)
get_all_external_links(link)
def follow_external_only (start_site):
external_link = get_random_external_link(start_site)
print("Random external link is :",external_link)
follow_external_only(external_link)
if __name__ == '__main__':
start_site = "http://www.hao123.com"
# follow_external_only(start_site)
get_all_external_links(start_site)
|
rbuffat/pyidf
|
tests/test_airflownetworkdistributioncomponentheatexchanger.py
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentHeatExchanger
log = logging.getLogger(__name__)
class TestAirflowNetworkDistributionComponentHeatExchanger(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkdistributioncomponentheatexchanger(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkDistributionComponentHeatExchanger()
# object-list
var_heatexchanger_name = "object-list|HeatExchanger Name"
obj.heatexchanger_name = var_heatexchanger_name
# alpha
var_heatexchanger_object_type = "HeatExchanger:AirToAir:FlatPlate"
obj.heatexchanger_object_type = var_heatexchanger_object_type
# real
var_air_path_length = 0.0001
obj.air_path_length = var_air_path_length
# real
var_air_path_hydraulic_diameter = 0.0001
obj.air_path_hydraulic_diameter = var_air_path_hydraulic_diameter
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_name, var_heatexchanger_name)
self.assertEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].heatexchanger_object_type, var_heatexchanger_object_type)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_length, var_air_path_length)
self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentheatexchangers[0].air_path_hydraulic_diameter, var_air_path_hydraulic_diameter)
|
seunghwanl/APMAE4990
|
webapp/forms.py
|
from flask_wtf import Form
from wtforms.fields import DecimalField, SubmitField, SelectField
from wtforms.validators import Required, Length, NumberRange
class LatLongForm(Form):
months_pairs = [('4', "April"), ('5', "May"), ('6', "June"), ('7', "July"), ('8', "August"), ('9', "September")]
days_pairs = [('0', "Monday"), ('1', "Tuesday"), ('2', "Wednesday"), ('3', "Thursday"), ('4', "Friday"), ('5', "Saturday"), ('6', "Sunday")]
hours_pairs = [('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10'), \
('11', '11'), ('12', '12')]
minutes_pairs = [('0', '00'), ('10', '10'), ('20', '20'), ('30', '30'), ('40', '40'), ('50', '50')]
am_pm_pairs = [('0', 'AM'), ('1', 'PM')]
month1 = SelectField('Month 1: ', choices = months_pairs)
day1 = SelectField('Day 1: ', choices = days_pairs)
hour1 = SelectField('Hour 1: ', choices = hours_pairs)
minute1 = SelectField('Minute 1:', choices = minutes_pairs)
am_pm1 = SelectField('AM / PM 1:', choices = am_pm_pairs)
latitude1 = DecimalField('Latitude 1: ', validators=[NumberRange(min=40.6, max=40.87, message ='value greater than 40.6 and smaller than 40.87'), Required()])
longitude1 = DecimalField('Longitude 1: ', validators=[NumberRange(min=-74.05, max=-73.9, message ='value greater than -74.05 and smaller than -73.9'), Required()])
month2 = SelectField('Month 2: ', choices = months_pairs)
day2 = SelectField('Day 2: ', choices = days_pairs)
hour2 = SelectField('Hour 2: ', choices = hours_pairs)
minute2 = SelectField('Minute 2:', choices = minutes_pairs)
am_pm2 = SelectField('AM / PM 2:', choices = am_pm_pairs)
latitude2 = DecimalField('Latitude 2: ', validators=[NumberRange(min=40.6, max=40.87, message ='value greater than 40.6 and smaller than 40.87'), Required()])
longitude2 = DecimalField('Longitude 2: ', validators=[NumberRange(min=-74.05, max=-73.9, message ='value greater than -74.05 and smaller than -73.9'), Required()])
submit = SubmitField('Enter!')
|
vponomaryov/rally
|
tests/unit/cli/test_envutils.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from six import moves
from rally.cli import envutils
from rally import exceptions
from tests.unit import test
class EnvUtilsTestCase(test.TestCase):
def test_default_from_global(self):
@envutils.default_from_global("test_arg_name",
"test_env_name",
"test_missing_arg")
def test_function(test_arg_name=None):
pass
with mock.patch("sys.stdout",
new_callable=moves.StringIO) as mock_stdout:
test_function()
self.assertEqual(mock_stdout.getvalue(),
"Missing argument: --test_missing_arg\n")
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "my_deployment_id"},
clear=True)
def test_get_deployment_id_in_env(self):
deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT)
self.assertEqual("my_deployment_id", deployment_id)
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_deployment_id_with_exception(self, mock_file):
self.assertRaises(exceptions.InvalidArgumentsException,
envutils.get_global, envutils.ENV_DEPLOYMENT, True)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_deployment_id_with_none(self, mock_file):
self.assertIsNone(envutils.get_global(envutils.ENV_DEPLOYMENT))
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={envutils.ENV_TASK: "my_task_id"},
clear=True)
def test_get_task_id_in_env(self):
self.assertEqual("my_task_id", envutils.get_global(envutils.ENV_TASK))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_task_id_with_exception(self, mock_file):
self.assertRaises(exceptions.InvalidArgumentsException,
envutils.get_global, envutils.ENV_TASK, True)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ, values={}, clear=True)
@mock.patch("rally.cli.envutils.fileutils.load_env_file")
def test_get_task_id_with_none(self, mock_file):
self.assertIsNone(envutils.get_global("RALLY_TASK"))
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"))
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "test_deployment_id"},
clear=True)
@mock.patch("os.path.exists")
@mock.patch("rally.cli.envutils.fileutils.update_env_file",
return_value=True)
def test_clear_global(self, mock_file, mock_file_status):
envutils.clear_global(envutils.ENV_DEPLOYMENT)
mock_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"), envutils.ENV_DEPLOYMENT, "\n")
self.assertEqual(os.environ, {})
@mock.patch.dict(os.environ,
values={envutils.ENV_DEPLOYMENT: "test_deployment_id",
envutils.ENV_TASK: "test_task_id"},
clear=True)
@mock.patch("os.path.exists")
@mock.patch("rally.cli.envutils.fileutils.update_env_file",
return_value=True)
def test_clear_env(self, mock_file, mock_file_status):
envutils.clear_env()
self.assertEqual(os.environ, {})
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multihosts/migrate/test_migrate_vm_with_iso2.py
|
'''
New Integration test for testing running vm migration between hosts when attach ISO.
@author: Chenyuan.xu
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.test_state as test_state
import apibinding.inventory as inventory
import zstacklib.utils.ssh as ssh
import time
import os
vm = None
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def exec_cmd_in_vm(vm, cmd, fail_msg):
ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, "root", "password", False, 22)
if ret != 0:
test_util.test_fail(fail_msg)
def test():
global vm
vm = test_stub.create_vr_vm('migrate_vm', 'imageName_net', 'l3VlanNetwork2')
vm.check()
ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid)
if ps.type == inventory.LOCAL_STORAGE_TYPE:
test_util.test_skip('Skip test on localstorage PS')
vm_inv = vm.get_vm()
vm_uuid = vm_inv.uuid
test_util.test_dsc('Add ISO Image')
#cond = res_ops.gen_query_conditions('name', '=', 'sftp')
bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid
img_option = test_util.ImageOption()
img_option.set_name('iso')
img_option.set_backup_storage_uuid_list([bs_uuid])
testIsoUrl = os.environ.get('testIsoUrl')
img_option.set_url(testIsoUrl)
image_inv = img_ops.add_iso_template(img_option)
image = test_image.ZstackTestImage()
image.set_image(image_inv)
image.set_creation_option(img_option)
test_obj_dict.add_image(image)
test_util.test_dsc('Attach ISO to VM')
cond = res_ops.gen_query_conditions('name', '=', 'iso')
iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid
img_ops.attach_iso(iso_uuid, vm_uuid)
time.sleep(10)
cmd = "mount /dev/sr0 /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to mount /dev/sr0 /mnt.")
test_util.test_dsc('Migrate VM')
test_stub.migrate_vm_to_random_host(vm)
vm.check()
cmd = "umount /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to umount /mnt.")
img_ops.detach_iso(vm_uuid)
img_ops.attach_iso(iso_uuid, vm_uuid)
time.sleep(10)
cmd = "mount /dev/sr0 /mnt"
exec_cmd_in_vm(vm, cmd, "Failed to mount /dev/sr0 /mnt.")
cmd = "cat /mnt/Licenses.txt"
exec_cmd_in_vm(vm, cmd, "Licenses.txt doesn't exist.")
img_ops.detach_iso(vm_uuid)
image.delete()
image.expunge()
test_obj_dict.rm_image(image)
vm.destroy()
test_util.test_pass('Migrate VM Test Success When Attach ISO')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
ladderlife/loonie
|
tools/tests/buck_to_junit.py
|
#!/usr/bin/env python
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg_resources import resource_stream
from optparse import OptionParser
from os import path, chdir
from os.path import abspath
from shutil import rmtree, copyfileobj
from subprocess import check_call, CalledProcessError
from tempfile import mkdtemp
opts = OptionParser()
opts.add_option('-t', help='test report to convert')
opts.add_option('-o', help='output directory')
args, _ = opts.parse_args()
temp_dir = mkdtemp()
try:
try:
check_call(
['curl', '--proxy-anyauth', '-sfo', path.join(temp_dir, 'saxon.jar'),
'http://central.maven.org/maven2/net/sf/saxon/Saxon-HE/9.6.0-6/Saxon-HE-9.6.0-6.jar'])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err)
exit(1)
buck_report = abspath(args.t)
buck_to_junit_xsl = abspath(
path.join(temp_dir, "buckToJUnit.xsl"))
with open(buck_to_junit_xsl, 'w') as fp:
with resource_stream('buck_to_junit', 'buckToJUnit.xsl') as rfp:
copyfileobj(rfp, fp)
chdir(args.o)
try:
check_call(
['java', '-jar', path.join(temp_dir, 'saxon.jar'), '-s:' + buck_report,
'-xsl:' + buck_to_junit_xsl])
except CalledProcessError as err:
print('error converting to junit: %s' % err)
exit(1)
finally:
rmtree(temp_dir, ignore_errors=True)
|
gundramleifert/exp_tf
|
models/lp/bdlstm_lp_v18.py
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_shifted_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 16
# learningRate = 0.001
# momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
numT = 32998
stepsPerEpocheTrain = numT / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen, keep_prob):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
logits3d, seqAfterConv = inference(inputX, seqLengths, keep_prob)
loss = loss(logits3d, targetY, seqAfterConv)
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
workList = workList[0:32998]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 0.5}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths, keep_prob: 1.0}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp18/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 573618.233516
# Train: CER 0.732053663761
# Train time 2566.37360501
# Val: CTC-loss 7505.82809639
# Val: CER 0.0942081060599
# Val time 70.4839401245
# Epoch 2 ...
# Train: CTC-loss 73084.4478534
# Train: CER 0.0846337911042
# Train time 2332.564466
# Val: CTC-loss 4603.30670595
# Val: CER 0.0561269361496
# Val time 70.5889220238
# Epoch 3 ...
# Train: CTC-loss 56508.2676985
# Train: CER 0.0645991757256
# Train time 2307.94393301
# Val: CTC-loss 4183.01323938
# Val: CER 0.0514986638259
# Val time 69.8001348972
# Epoch 4 ...
# Train: CTC-loss 50729.6482956
# Train: CER 0.0577029808028
# Train time 2291.99180603
# Val: CTC-loss 4037.41225258
# Val: CER 0.0482444823993
# Val time 71.2115728855
# Epoch 5 ...
# Train: CTC-loss 46517.1993931
# Train: CER 0.0531190908993
# Train time 2281.25194097
# Val: CTC-loss 3822.13585426
# Val: CER 0.0456547654697
# Val time 71.494040966
# Epoch 6 ...
# Train: CTC-loss 44121.3987505
# Train: CER 0.0502368603453
# Train time 2285.71324015
# Val: CTC-loss 3754.89540133
# Val: CER 0.0438374517296
# Val time 70.6552250385
# Epoch 7 ...
# Train: CTC-loss 41823.5579544
# Train: CER 0.0477986275146
# Train time 2265.03064203
# Val: CTC-loss 3644.30589531
# Val: CER 0.0427835063939
# Val time 72.7770349979
# Epoch 8 ...
# Train: CTC-loss 40823.7625133
# Train: CER 0.0468467820267
# Train time 2255.28358293
# Val: CTC-loss 3711.35232484
# Val: CER 0.0433838154652
# Val time 72.92958498
# Epoch 9 ...
# Train: CTC-loss 41962.8032772
# Train: CER 0.0473091210596
# Train time 2240.59626412
# Val: CTC-loss 3429.378994
# Val: CER 0.0395136085105
# Val time 68.2135629654
# Epoch 10 ...
# Train: CTC-loss 39743.3587489
# Train: CER 0.0449621349412
# Train time 2247.17607689
# Val: CTC-loss 3538.12361477
# Val: CER 0.0405062843353
# Val time 73.0749549866
# Epoch 11 ...
# Train: CTC-loss 37841.1128339
# Train: CER 0.0436842029487
# Train time 1877.61327505
# Val: CTC-loss 3562.50696394
# Val: CER 0.0415591884922
# Val time 59.2560589314
# Epoch 12 ...
# Train: CTC-loss 38564.065591
# Train: CER 0.0441661707426
# Train time 1815.8160131
# Val: CTC-loss 3493.62069036
# Val: CER 0.0383308982865
# Val time 59.280351162
# Epoch 13 ...
# Train: CTC-loss 36559.2618546
# Train: CER 0.0421647087487
# Train time 1828.42957115
# Val: CTC-loss 3482.46136662
# Val: CER 0.0394279501017
# Val time 58.629338026
# Epoch 14 ...
# Train: CTC-loss 36965.8608795
# Train: CER 0.0431305091687
# Train time 1601.83509898
# Val: CTC-loss 3487.08890994
# Val: CER 0.0393206818617
# Val time 45.286646843
# Epoch 15 ...
# Train: CTC-loss 35303.027672
# Train: CER 0.0411365469195
# Train time 1386.08462787
# Val: CTC-loss 3466.22052066
# Val: CER 0.0385100504752
# Val time 44.8697309494
|
spandanb/horizon
|
openstack_dashboard/dashboards/project/networks/subnets/workflows.py
|
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import workflows \
as network_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction):
with_subnet = forms.BooleanField(initial=True, required=False,
widget=forms.HiddenInput())
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(network_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
depends_on = ("network_id",)
class CreateSubnet(network_workflows.CreateNetwork):
slug = "create_subnet"
name = _("Create Subnet")
finalize_button_name = _("Create")
success_message = _('Created subnet "%s".')
failure_message = _('Unable to create subnet "%s".')
default_steps = (CreateSubnetInfo,
network_workflows.CreateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def get_failure_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def handle(self, request, data):
subnet = self._create_subnet(request, data)
return True if subnet else False
class UpdateSubnetInfoAction(CreateSubnetInfoAction):
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
# NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField
# and ValidationError is raised for POST request, the initial value of
# the ip_version ChoiceField is not set in the re-displayed form
# As a result, 'IPv4' is displayed even when IPv6 is used if
# ValidationError is detected. In addition 'required=True' check complains
# when re-POST since the value of the ChoiceField is not set.
# Thus now I use HiddenInput for the ip_version ChoiceField as a work
# around.
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
#widget=forms.Select(
# attrs={'disabled': 'disabled'}),
widget=forms.HiddenInput(),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP (optional)"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254). "
"You need to specify an explicit address "
"to set the gateway. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can update a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data, is_create=False)
return cleaned_data
class UpdateSubnetInfo(CreateSubnetInfo):
action_class = UpdateSubnetInfoAction
depends_on = ("network_id", "subnet_id")
class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction):
allocation_pools = forms.CharField(widget=forms.HiddenInput(),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
class UpdateSubnetDetail(network_workflows.CreateSubnetDetail):
action_class = UpdateSubnetDetailAction
class UpdateSubnet(network_workflows.CreateNetwork):
slug = "update_subnet"
name = _("Edit Subnet")
finalize_button_name = _("Save")
success_message = _('Updated subnet "%s".')
failure_message = _('Unable to update subnet "%s".')
success_url = "horizon:project:networks:detail"
failure_url = "horizon:project:networks:detail"
default_steps = (UpdateSubnetInfo,
UpdateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse(self.success_url,
args=(self.context.get('network_id'),))
def _update_subnet(self, request, data):
network_id = self.context.get('network_id')
try:
subnet_id = self.context.get('subnet_id')
params = {}
params['name'] = data['subnet_name']
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
#We should send gateway_ip only when it is changed,
#because updating gateway_ip is prohibited
#when the ip is used.
#see bug 1227268
subnet = api.neutron.subnet_get(request, subnet_id)
if params['gateway_ip'] == subnet.gateway_ip:
del params['gateway_ip']
self._setup_subnet_parameters(params, data, is_create=False)
subnet = api.neutron.subnet_update(request, subnet_id, **params)
msg = _('Subnet "%s" was successfully updated.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = (_('Failed to update subnet "%(sub)s": '
' %(reason)s') %
{"sub": data['cidr'], "reason": e})
redirect = reverse(self.failure_url, args=(network_id,))
exceptions.handle(request, msg, redirect=redirect)
return False
def handle(self, request, data):
subnet = self._update_subnet(request, data)
return True if subnet else False
|
zwhinmedia/TextRank
|
textrank4zh/util.py
|
#-*- encoding:utf-8 -*-
"""
@author: Weihao Zhou
@github: https://github.com/zwhinmedia/
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import math
import networkx as nx
import numpy as np
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#sentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…','】','【','\n']
sentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…','】','【','\n']
allow_speech_tags = ['an', 'i', 'j', 'l', 'n', 'nr', 'nrfg', 'ns', 'nt', 'nz', 't', 'v', 'vd', 'vn', 'eng']
PY2 = sys.version_info[0] == 2
if not PY2:
# Python 3.x and up
text_type = str
string_types = (str,)
xrange = range
def as_text(v): ## 生成unicode字符串
if v is None:
return None
elif isinstance(v, bytes):
return v.decode('utf-8', errors='ignore')
elif isinstance(v, str):
return v
else:
raise ValueError('Unknown type %r' % type(v))
def is_text(v):
return isinstance(v, text_type)
else:
# Python 2.x
text_type = unicode
string_types = (str, unicode)
xrange = xrange
def as_text(v):
if v is None:
return None
elif isinstance(v, unicode):
return v
elif isinstance(v, str):
return v.decode('utf-8', errors='ignore')
else:
raise ValueError('Invalid type %r' % type(v))
def is_text(v):
return isinstance(v, text_type)
__DEBUG = None
def debug(*args):
global __DEBUG
if __DEBUG is None:
try:
if os.environ['DEBUG'] == '1':
__DEBUG = True
else:
__DEBUG = False
except:
__DEBUG = False
if __DEBUG:
print( ' '.join([str(arg) for arg in args]) )
class AttrDict(dict):
"""Dict that can get attribute by dot"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def combine(word_list, window = 2):
"""构造在window下的单词组合,用来构造单词之间的边。
Keyword arguments:
word_list -- list of str, 由单词组成的列表。
windows -- int, 窗口大小。
"""
if window < 2: window = 2
for x in xrange(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
def get_similarity(word_list1, word_list2):
"""默认的用于计算两个句子相似度的函数。
Keyword arguments:
word_list1, word_list2 -- 分别代表两个句子,都是由单词组成的列表
"""
words = list(set(word_list1 + word_list2))
vector1 = [float(word_list1.count(word)) for word in words]
vector2 = [float(word_list2.count(word)) for word in words]
vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]
vector4 = [1 for num in vector3 if num > 0.]
co_occur_num = sum(vector4)
if abs(co_occur_num) <= 1e-12:
return 0.
denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
if abs(denominator) < 1e-12:
return 0.
return co_occur_num / denominator
def sort_words(vertex_source, edge_source, window = 2, pagerank_config = {'alpha': 0.85,}):
"""将单词按关键程度从大到小排序
Keyword arguments:
vertex_source -- 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点
edge_source -- 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边
window -- 一个句子中相邻的window个单词,两两之间认为有边
pagerank_config -- pagerank的设置
"""
sorted_words = []
word_index = {}
index_word = {}
_vertex_source = vertex_source
_edge_source = edge_source
words_number = 0
for word_list in _vertex_source:
for word in word_list:
if not word in word_index:
word_index[word] = words_number
index_word[words_number] = word
words_number += 1
graph = np.zeros((words_number, words_number))
for word_list in _edge_source:
for w1, w2 in combine(word_list, window):
if w1 in word_index and w2 in word_index:
index1 = word_index[w1]
index2 = word_index[w2]
graph[index1][index2] = 1.0
graph[index2][index1] = 1.0
debug('graph:\n', graph)
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(word=index_word[index], weight=score)
sorted_words.append(item)
return sorted_words
def sort_sentences(sentences, words, sim_func = get_similarity, pagerank_config = {'alpha': 0.85,}):
"""将句子按照关键程度从大到小排序
Keyword arguments:
sentences -- 列表,元素是句子
words -- 二维列表,子列表和sentences中的句子对应,子列表由单词组成
sim_func -- 计算两个句子的相似性,参数是两个由单词组成的列表
pagerank_config -- pagerank的设置
"""
sorted_sentences = []
_source = words
sentences_num = len(_source)
graph = np.zeros((sentences_num, sentences_num))
for x in xrange(sentences_num):
for y in xrange(x, sentences_num):
similarity = sim_func( _source[x], _source[y] )
graph[x, y] = similarity
graph[y, x] = similarity
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(index=index, sentence=sentences[index], weight=score)
sorted_sentences.append(item)
return sorted_sentences
if __name__ == '__main__':
pass
|
google-research/pyreach
|
pyreach/gyms/pausable_env.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a wrapper to PyReach env that allows pausing and resuming it.
This is used as a building blcok for EnvTimeSlicer().
"""
import enum
import threading
from typing import Any, Callable, List, Optional, Tuple
import gym # type: ignore
from pyreach.gyms import reach_env
class SessionEndReason(enum.Enum):
"""Enumerates different reasons for detecting that a session has ended.
Used by PausableEnv to notify why a session was detected to have ended.
"""
AGENT_CALLED_RESET = 1
STEP_RETURNED_DONE = 2
AGENT_FINISHED = 3
class PausableEnv(gym.Wrapper):
"""Makes a gym environment pausable.
This environment wrapper allows the environment to be paused and resumed at
will. When paused, any calls that might modify the environment (such as move
the robot), will be blocked (i.e. the code path will be put on hold).
Example usage -
pausable_env = PausableEnv(env)
# Start an agent in the background.
threading.Thread(target=agent.run, args=[pausable_env]).start()
# The environment may be paused or resumed at any time.
# Pausing will continue work until the next command from the agent.
# The next method called by the agent will not return until env resumes.
pausable_env.pause()
# On resume(), the env will be marked active. If any agent was running and
# blocked, it will immediately get control back.
# Context such as agent id will be monitored and restored on resume.
pausable_env.resume()
"""
def __init__(self,
env: reach_env.ReachEnv,
start_paused: bool = False) -> None:
super().__init__(env)
self._env = env
# Event to know if the environment is active or paused.
self._is_active = threading.Event()
# Since the agent id can be changed while this env is paused, we
# remember the agent id and set it again on resume.
self._last_agent_id: Optional[str] = None
# Contains a tuple with a custom context, and a callback.
# The callback will be called as -
# callback(session_end_reason, custom_context).
# The custom_context can be defined in add_session_callback().
self._session_end_callbacks: List[Tuple[Any,
Callable[[SessionEndReason, Any],
None]]] = []
if not start_paused:
self.resume()
def is_active(self) -> bool:
return self._is_active.is_set()
def wait_till_active(self) -> None:
self._is_active.wait()
def pause(self) -> None:
"""Pauses this environment.
All calls that may require the environment to do something will be paused,
until resume() is called.
"""
self._is_active.clear()
def resume(self) -> None:
"""Resumes this particular environment."""
if self._last_agent_id is not None:
self._env.set_agent_id(self._last_agent_id)
self._is_active.set()
def _delegate(self, method: Callable[..., Any], *args: Any,
**kwargs: Any) -> Any:
self._is_active.wait()
return method(*args, **kwargs)
def _notify_new_session(self, session_end_reason: SessionEndReason) -> None:
"""Notifies any handlers that a session has ended."""
for custom_context, on_session_end in self._session_end_callbacks:
on_session_end(session_end_reason, custom_context)
def add_session_end_callback(self,
fn: Callable[[SessionEndReason, Any], None],
context: Any = None) -> None:
self._session_end_callbacks.append((context, fn))
def agent_ended(self) -> None:
"""Can be called by an agent to communicate that a session has ended."""
self._notify_new_session(SessionEndReason.AGENT_FINISHED)
# The environment may get paused as a result of the call above.
# If so, we block the agent which called this method until resumed.
self._is_active.wait()
# Override all methods that need to be paused when this env is not activated.
def step(self, *args: Any, **kwargs: Any) -> Any:
result = self._delegate(self._env.step, *args, **kwargs)
done = result[2] if len(result) >= 2 else False
if done:
self._notify_new_session(SessionEndReason.STEP_RETURNED_DONE)
return result
def reset(self, *args: Any, **kwargs: Any) -> Any:
result = self._delegate(self._env.reset, *args, **kwargs)
self._notify_new_session(SessionEndReason.AGENT_CALLED_RESET)
return result
def render(self, *args: Any, **kwargs: Any) -> Any:
return self._delegate(self._env.render, *args, **kwargs)
def close(self, *args: Any, **kwargs: Any) -> Any:
return self._delegate(self._env.close, *args, **kwargs)
def set_agent_id(self, agent_id: str) -> None:
self._last_agent_id = agent_id
return self._delegate(self._env.set_agent_id, agent_id)
|
uw-it-aca/canvas-sis-provisioner
|
sis_provisioner/models/enrollment.py
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.db import models, IntegrityError
from django.db.models import F
from django.conf import settings
from django.utils.timezone import utc, localtime
from sis_provisioner.models import Import, ImportResource
from sis_provisioner.models.course import Course
from sis_provisioner.models.user import User
from sis_provisioner.dao.term import is_active_term
from sis_provisioner.dao.canvas import (
get_instructor_sis_import_role, ENROLLMENT_ACTIVE)
from sis_provisioner.exceptions import EmptyQueueException
from restclients_core.exceptions import DataFailureException
from datetime import datetime, timedelta
from logging import getLogger
logger = getLogger(__name__)
enrollment_log_prefix = 'ENROLLMENT:'
class EnrollmentManager(models.Manager):
def queue_by_priority(self, priority=ImportResource.PRIORITY_DEFAULT):
filter_limit = settings.SIS_IMPORT_LIMIT['enrollment']['default']
pks = super(EnrollmentManager, self).get_queryset().filter(
priority=priority, queue_id__isnull=True
).order_by(
'last_modified'
).values_list('pk', flat=True)[:filter_limit]
if not len(pks):
raise EmptyQueueException()
imp = Import(priority=priority, csv_type='enrollment')
imp.save()
super(EnrollmentManager, self).get_queryset().filter(
pk__in=list(pks)).update(queue_id=imp.pk)
return imp
def queued(self, queue_id):
return super(EnrollmentManager, self).get_queryset().filter(
queue_id=queue_id)
def dequeue(self, sis_import):
Course.objects.dequeue(sis_import)
if sis_import.is_imported():
# Decrement the priority
super(EnrollmentManager, self).get_queryset().filter(
queue_id=sis_import.pk, priority__gt=Enrollment.PRIORITY_NONE
).update(
queue_id=None, priority=F('priority') - 1)
else:
self.queued(sis_import.pk).update(queue_id=None)
self.purge_expired()
def purge_expired(self):
retention_dt = datetime.utcnow().replace(tzinfo=utc) - timedelta(
days=getattr(settings, 'ENROLLMENT_EVENT_RETENTION_DAYS', 180))
return super(EnrollmentManager, self).get_queryset().filter(
priority=Enrollment.PRIORITY_NONE,
last_modified__lt=retention_dt).delete()
def add_enrollment(self, enrollment_data):
section = enrollment_data.get('Section')
reg_id = enrollment_data.get('UWRegID')
role = enrollment_data.get('Role')
status = enrollment_data.get('Status').lower()
last_modified = enrollment_data.get('LastModified').replace(tzinfo=utc)
request_date = enrollment_data.get('RequestDate')
instructor_reg_id = enrollment_data.get('InstructorUWRegID', None)
course_id = '-'.join([section.term.canvas_sis_id(),
section.curriculum_abbr.upper(),
section.course_number,
section.section_id.upper()])
primary_course_id = None
if section.is_primary_section:
primary_course_id = None
else:
primary_course_id = section.canvas_course_sis_id()
full_course_id = '-'.join([course_id, instructor_reg_id]) if (
instructor_reg_id is not None) else course_id
try:
course = Course.objects.get(course_id=full_course_id)
if course.provisioned_date:
enrollment = Enrollment.objects.get(course_id=course_id,
reg_id=reg_id,
role=role)
if (last_modified > enrollment.last_modified or (
last_modified == enrollment.last_modified and
status == ENROLLMENT_ACTIVE)):
enrollment.status = status
enrollment.last_modified = last_modified
enrollment.request_date = request_date
enrollment.primary_course_id = primary_course_id
enrollment.instructor_reg_id = instructor_reg_id
if enrollment.queue_id is None:
enrollment.priority = enrollment.PRIORITY_DEFAULT
else:
enrollment.priority = enrollment.PRIORITY_HIGH
logger.info('{} IN QUEUE {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
role, enrollment.queue_id))
enrollment.save()
logger.info('{} UPDATE {}, {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role,
status, last_modified))
else:
logger.info('{} IGNORE {}, {}, {} before {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
last_modified, enrollment.last_modified))
else:
logger.info('{} IGNORE Unprovisioned course {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role))
course.priority = course.PRIORITY_HIGH
course.save()
except Enrollment.DoesNotExist:
enrollment = Enrollment(course_id=course_id, reg_id=reg_id,
role=role, status=status,
last_modified=last_modified,
primary_course_id=primary_course_id,
instructor_reg_id=instructor_reg_id)
try:
enrollment.save()
logger.info('{} ADD {}, {}, {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role,
status, last_modified))
except IntegrityError:
self.add_enrollment(enrollment_data) # Try again
except Course.DoesNotExist:
if is_active_term(section.term):
# Initial course provisioning effectively picks up event
course = Course(course_id=full_course_id,
course_type=Course.SDB_TYPE,
term_id=section.term.canvas_sis_id(),
primary_id=primary_course_id,
priority=Course.PRIORITY_HIGH)
try:
course.save()
logger.info(
'{} IGNORE Unprovisioned course {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id,
role))
except IntegrityError:
self.add_enrollment(enrollment_data) # Try again
else:
logger.info('{} IGNORE Inactive section {}, {}, {}'.format(
enrollment_log_prefix, full_course_id, reg_id, role))
class Enrollment(ImportResource):
""" Represents the provisioned state of an enrollment.
"""
reg_id = models.CharField(max_length=32)
status = models.CharField(max_length=16)
role = models.CharField(max_length=32)
course_id = models.CharField(max_length=80)
last_modified = models.DateTimeField()
request_date = models.DateTimeField(null=True)
primary_course_id = models.CharField(max_length=80, null=True)
instructor_reg_id = models.CharField(max_length=32, null=True)
priority = models.SmallIntegerField(
default=ImportResource.PRIORITY_DEFAULT,
choices=ImportResource.PRIORITY_CHOICES)
queue_id = models.CharField(max_length=30, null=True)
objects = EnrollmentManager()
def is_active(self):
return self.status.lower() == ENROLLMENT_ACTIVE.lower()
def is_instructor(self):
return self.role.lower() == get_instructor_sis_import_role()
def json_data(self):
return {
"reg_id": self.reg_id,
"status": self.status,
"course_id": self.course_id,
"last_modified": localtime(self.last_modified).isoformat() if (
self.last_modified is not None) else None,
"request_date": localtime(self.request_date).isoformat() if (
self.request_date is not None) else None,
"primary_course_id": self.primary_course_id,
"instructor_reg_id": self.instructor_reg_id,
"role": self.role,
"priority": self.PRIORITY_CHOICES[self.priority][1],
"queue_id": self.queue_id,
}
class Meta:
unique_together = ("course_id", "reg_id", "role")
class InvalidEnrollmentManager(models.Manager):
def queue_by_priority(self, priority=ImportResource.PRIORITY_DEFAULT):
filter_limit = settings.SIS_IMPORT_LIMIT['enrollment']['default']
pks = super(InvalidEnrollmentManager, self).get_queryset().filter(
priority=priority, queue_id__isnull=True
).order_by('pk').values_list('pk', flat=True)[:filter_limit]
if not len(pks):
raise EmptyQueueException()
imp = Import(priority=priority, csv_type='invalid_enrollment')
imp.save()
super(InvalidEnrollmentManager, self).get_queryset().filter(
pk__in=list(pks)).update(queue_id=imp.pk)
return imp
def queued(self, queue_id):
return super(InvalidEnrollmentManager, self).get_queryset().filter(
queue_id=queue_id)
def dequeue(self, sis_import):
if sis_import.is_imported():
self.queued(sis_import.pk).update(
queue_id=None, priority=InvalidEnrollment.PRIORITY_NONE)
def add_enrollments(self):
check_roles = getattr(settings, 'ENROLLMENT_TYPES_INVALID_CHECK')
for user in User.objects.get_invalid_enrollment_check_users():
# Verify that the check conditions still exist
if user.is_affiliate_user() or user.is_sponsored_user():
# User is OK to have any of the check_roles, restore if needed
for inv in InvalidEnrollment.objects.filter(
user=user, restored_date__isnull=True):
inv.priority = InvalidEnrollment.PRIORITY_DEFAULT
inv.save()
elif user.is_student_user():
# User is not OK to have any of the check_roles
try:
for enr in user.get_active_sis_enrollments(
roles=check_roles):
inv, _ = InvalidEnrollment.objects.get_or_create(
user=user, role=enr.role,
section_id=enr.sis_section_id)
if inv.priority == InvalidEnrollment.PRIORITY_NONE:
inv.priority = InvalidEnrollment.PRIORITY_DEFAULT
inv.save()
except DataFailureException as ex:
if ex.status != 404:
raise
# Clear check flag
user.invalid_enrollment_check_required = False
user.save()
class InvalidEnrollment(ImportResource):
user = models.ForeignKey(User, on_delete=models.CASCADE)
role = models.CharField(max_length=32)
section_id = models.CharField(max_length=80)
found_date = models.DateTimeField(auto_now_add=True)
deleted_date = models.DateTimeField(null=True)
restored_date = models.DateTimeField(null=True)
priority = models.SmallIntegerField(
default=ImportResource.PRIORITY_DEFAULT,
choices=ImportResource.PRIORITY_CHOICES)
queue_id = models.CharField(max_length=30, null=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['user', 'role', 'section_id'],
name='unique_enrollment')
]
objects = InvalidEnrollmentManager()
|
erikr/happinesspackets
|
happinesspackets/messaging/migrations/0004_auto_20160403_1742.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-03 15:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messaging', '0003_blacklistedemail_stripped_email'),
]
operations = [
migrations.AddField(
model_name='message',
name='recipient_email_stripped',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='message',
name='sender_email_stripped',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
rajpushkar83/cloudmesh
|
cloudmesh/shell/cm_image.py
|
#!/usr/bin/env python
from __future__ import print_function
from docopt import docopt
from cloudmesh.cm_mongo import cm_mongo
from cloudmesh.config.cm_config import cm_config
from cloudmesh_base.logger import LOGGER
from tabulate import tabulate
log = LOGGER(__file__)
def shell_command_image(arguments):
"""
::
Usage:
image
image <cm_cloud>... [--refresh]
image -h | --help
image --version
Options:
-h help message
--refresh refresh images of IaaS
Arguments:
cm_cloud Name of the IaaS cloud e.g. india_openstack_grizzly.
Description:
image command provides list of available images. Image describes
pre-configured virtual machine image.
Result:
Examples:
$ image india_openstack_grizzly
"""
# log.info(arguments)
cloud_names = arguments['<cm_cloud>']
# None value means ALL clouds in c.images() function
if not cloud_names:
cloud_names = None
config = cm_config()
username = config.username()
c = cm_mongo()
c.activate(cm_user_id=username)
if arguments['--refresh']:
c.refresh(cm_user_id=username, names=cloud_names, types=['images'])
images_dict = c.images(cm_user_id=username, clouds=cloud_names)
your_keys = {"openstack":
[
# ["Metadata", "metadata"],
["status", "status"],
["name", "name"],
["id", "id"],
["type_id", "metadata", "instance_type_id"],
["iname", "metadata", "instance_type_name"],
["location", "metadata", "image_location"],
["state", "metadata", "image_state"],
["updated", "updated"],
["minDisk", "minDisk"],
["memory_mb", "metadata", 'instance_type_memory_mb'],
["fid", "metadata", "instance_type_flavorid"],
["vcpus", "metadata", "instance_type_vcpus"],
["user_id", "metadata", "user_id"],
["owner_id", "metadata", "owner_id"],
["gb", "metadata", "instance_type_root_gb"],
["arch", ""]
],
"ec2":
[
# ["Metadata", "metadata"],
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "is_public"],
["ownerid", "extra", "owner_id"],
["imagetype", "extra", "image_type"]
],
"azure":
[
["name", "label"],
["category", "category"],
["id", "id"],
["size", "logical_size_in_gb"],
["os", "os"]
],
"aws":
[
["state", "extra", "state"],
["name", "name"],
["id", "id"],
["public", "extra", "ispublic"],
["ownerid", "extra", "ownerid"],
["imagetype", "extra", "imagetype"]
]
}
images = _select_images(images_dict, your_keys)
_display(images)
def _select_images(data, selected_keys, env=[]):
"""
status ACTIVE
updated 2013-05-26T19:29:09Z
name menghan/custom-utuntu-01
links [{u'href': u'http://198.202.120.83:8774/v1.1/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'rel': u'self'}, {u'href': u'http://198.202.120.83:8774/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'rel': u'bookmark'}, {u'href': u'http://198.202.120.83:9292/1ae6813a3a6d4cebbeb1912f6d139ad0/images/502a5967-18ff-448b-830f-d6150b650d6b', u'type': u'application/vnd.openstack.image', u'rel': u'alternate'}]
created 2013-05-26T19:28:09Z
minDisk 0
metadata {u'instance_uuid': u'16a5f5ac-7f39-4b01-a2c3-b2003beffb9d',
u'image_location': u'snapshot',
u'image_state': u'available',
u'instance_type_memory_mb': u'2048',
u'instance_type_swap': u'0',
u'instance_type_vcpu_weight': u'None',
u'image_type': u'snapshot',
u'instance_type_id': u'5',
u'ramdisk_id': None,
u'instance_type_name': u'm1.small',
u'instance_type_ephemeral_gb': u'0',
u'instance_type_rxtx_factor': u'1',
u'kernel_id': None,
u'instance_type_flavorid': u'2',
u'instance_type_vcpus': u'1',
u'user_id': u'f603818711324203970ed1e3bb4b90ed',
u'instance_type_root_gb': u'20',
attributes = {"openstack":
[
['name','name'],
['status','status'],
['addresses','addresses'],
['flavor', 'flavor','id'],
['id','id'],
['image','image','id'],
['user_id', 'user_id'],
['metadata','metadata'],
['key_name','key_name'],
['created','created'],
],
"ec2":
[
["name", "id"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id','id'],
['image','extra', 'imageId'],
["user_id", 'user_id'],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"aws":
[
["name", "name"],
["status", "extra", "status"],
["addresses", "public_ips"],
["flavor", "extra", "instance_type"],
['id','id'],
['image','extra', 'image_id'],
["user_id","user_id"],
["metadata", "metadata"],
["key_name", "extra", "key_name"],
["created", "extra", "launch_time"]
],
"azure":
[
['name','name'],
['status','status'],
['addresses','vip'],
['flavor', 'flavor','id'],
['id','id'],
['image','image','id'],
['user_id', 'user_id'],
['metadata','metadata'],
['key_name','key_name'],
u'base_image_ref': u'1a5fd55e-79b9-4dd5-ae9b-ea10ef3156e9',
u'owner_id': u'1ae6813a3a6d4cebbeb1912f6d139ad0'}
server {u'id': u'16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'links': [{u'href': u'http://198.202.120.83:8774/v1.1/1ae6813a3a6d4cebbeb1912f6d139ad0/servers/16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'rel': u'self'}, {u'href': u'http://198.202.120.83:8774/1ae6813a3a6d4cebbeb1912f6d139ad0/servers/16a5f5ac-7f39-4b01-a2c3-b2003beffb9d', u'rel': u'bookmark'}]}
cm_id sierra_openstack_grizzly-images-menghan/custom-utuntu-01
cm_refresh 2013-08-06T21-44-13Z
cm_cloud sierra_openstack_grizzly
minRam 0
progress 100
cm_kind images
_id 5201a66d7df38caf0fe160b5
cm_type openstack
id 502a5967-18ff-448b-830f-d6150b650d6b
OS-EXT-IMG-SIZE:size 876216320
b99fa4c8-6b92-49e6-b53f-37e56f9383b6
"""
images = []
keys = []
def _getFromDict(dataDict, mapList):
'''Get values of dataDict by mapList
mapList is a list of keys to find values in dict.
dataDict is a nested dict and will be searched by the list.
e.g. Access to the value 5 in dataDict
dataDict = { "abc": {
"def": 5
}
}
mapList = ["abc", "def"]
_getFromDict(dataDict, mapList) returns 5
ref: http://stackoverflow.com/questions/14692690/access-python-nested-dictionary-items-via-a-list-of-keys
'''
return reduce(lambda d, k: d[k], mapList, dataDict)
for cm_cloud, _id in data.iteritems():
for image_name, v in _id.iteritems():
values = [cm_cloud]
# cm_type is required to use a selected_keys for the cm_type
cm_type = v['cm_type']
keys = []
for k in selected_keys[cm_type]:
keys.append(k[0])
try:
values.append(_getFromDict(v, k[1:]))
except:
# print sys.exc_info()
values.append(0)
images.append(values)
headers = [keys]
return headers + images
def _display(json_data, headers="firstrow", tablefmt="orgtbl"):
table = tabulate(json_data, headers, tablefmt)
try:
separator = table.split("\n")[1].replace("|", "+")
except:
separator = "-" * 50
print(separator)
print(table)
print(separator)
def main():
arguments = docopt(shell_command_image.__doc__)
shell_command_image(arguments)
if __name__ == "__main__":
# print sys.argv
main()
|
Keisuke69/libcloud
|
libcloud/security.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Security (SSL) Settings
Usage:
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = True
# optional
libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt")
"""
VERIFY_SSL_CERT = True
VERIFY_SSL_CERT_STRICT = True
# File containing one or more PEM-encoded CA certificates
# concatenated together
CA_CERTS_PATH = [
# centos/fedora: openssl
'/etc/pki/tls/certs/ca-bundle.crt',
# debian/ubuntu/arch/gentoo: ca-certificates
'/etc/ssl/certs/ca-certificates.crt',
# freebsd: ca_root_nss
'/usr/local/share/certs/ca-root-nss.crt',
# macports: curl-ca-bundle
'/opt/local/share/curl/curl-ca-bundle.crt',
]
CA_CERTS_UNAVAILABLE_WARNING_MSG = (
'Warning: No CA Certificates were found in CA_CERTS_PATH. '
'Toggling VERIFY_SSL_CERT to False.'
)
CA_CERTS_UNAVAILABLE_ERROR_MSG = (
'No CA Certificates were found in CA_CERTS_PATH. '
)
VERIFY_SSL_DISABLED_MSG = (
'SSL certificate verification is disabled, this can pose a '
'security risk. For more information how to enable the SSL '
'certificate verification, please visit the libcloud '
'documentation.'
)
|
ulikoehler/PCBCheck
|
pcbcheck.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from collections import namedtuple, defaultdict, Counter
import subprocess
import re
from ansicolor import red, yellow, green, black
def readFileLines(filename):
with open(filename, encoding="iso-8859-1") as infile:
return infile.read().split("\n")
def extractToolStatistics(lines):
"""
From a list of excellon drill lines extract the number of holes for all drill sizes.
Returns a dict: {drill size: number of holes}
"""
#Get a tool --> diameter mapping
tools = extractExcellonTools(lines)
#Iterate over lines and count holes for each tool
currentTool = None
drillCountByDiameter = defaultdict(int)
toolRegex = re.compile(r"^(T\d+)$")
drillRegex = re.compile(r"^X[\+-]\d+Y[\+-]\d+$")
for line in lines:
if toolRegex.match(line):
#This line defines a new tool to use
currentTool = toolRegex.match(line).group(0)
if drillRegex.match(line):
drillCountByDiameter[tools[currentTool]] += 1
return drillCountByDiameter
def extractExcellonTools(lines):
"""
From a list of excellon lines, extract a dict of tools
Ignores non-tool-definition lines
Example: ["foobar", "T01C1.0", "T02C2.2"] -> {"T01": 1.0, "T02": 2.2}
"""
#Extract those lines that match a regex
toolDefRegex = re.compile(r"^(T\d+)C([0-9\.]+)$")
toolDefMatches = [toolDefRegex.match(l) for l in lines if toolDefRegex.match(l)]
return dict([(t.group(1), float(t.group(2))) for t in toolDefMatches])
def checkExcellonMetric(self, filepath):
"Check if a given file is a metric excellon file"
filename = os.path.basename(filepath)
lines = readFileLines(filepath)
#Check for excellon header
if lines[0] != "M48":
print(red("Can't find Excellon drill header (M48) in %s" % filename, bold="True"))
#Check for metric dimension: Line like METRIC,0000.00
if lines[1].partition(",")[0] != "METRIC":
print(red("Excellon drill program %s does not seem to be metric" % filename, bold="True"))
#
# Drill statistics
#
toolStats = extractToolStatistics(lines)
print(black(self.name + ":", bold=True))
for diameter, numDrills in toolStats.items():
print("\t%d through holes of diameter %.2fmm" % (numDrills, diameter))
#Print "None" if there are no holes in this file
if not toolStats:
print("\tNone")
#Multimap of allowed layer notes (ExpectedFile.name --> [%LN])
#Built for diptrace. Might need to be adjusted for other EDA tools.
allowedLayerNotes = defaultdict(list)
allowedLayerNotes.update({
"Top copper layer": ["Top", ['Copper', 'L1', 'Top']],
"Bottom copper layer": ["Bottom", ['Copper', 'L2', 'Bot']],
"Solder mask top": ["TopMask", ['Soldermask', 'Top']],
"Solder mask bottom": ["BotMask", ['Soldermask', 'Bot']],
"Board outline": ["BoardOutline", ['Profile']],
"Silk screen top": ["TopSilk", ['Legend', 'Top']],
})
#Gerber aperture
# id: The aperture identifier, e.g. D11
# type: "C"/"R"
# diameter: float, with implicit units
Aperture = namedtuple("Aperture", ["id", "type", "diameter"])
def parseGerberApertures(lines):
"From a list of gerber lines, parse all embedded apertures"
apertureRegex = re.compile(r"%AD(D\d+)([CR]),(\d+\.\d+)\*%")
apertures = []
#Find lines defining apertures
for line in lines:
if apertureRegex.match(line):
match = apertureRegex.match(line)
apertures.append(Aperture(match.group(1), match.group(2), float(match.group(3))))
return apertures
def findAperture(apertures, identifier):
"Find an aperture in a list of apertures (returns None if not found)"
for aperture in apertures:
if aperture.id == identifier: return aperture
return None
def parseGerberUnit(lines):
"""Returns the extended gerber unit ("mm"/"in") or None if not found"""
if "%MOIN*%" in lines:
return "in"
elif "%MOMM*%" in lines:
return "mm"
else: return None
def findCoordinateFormat(lines):
"""
Try to find a FSLAX line and return the decimal-point factor for coordinates.
"""
rgx = re.compile(r"\%FSLAX(\d{2})Y(\d{2})\*\%")
for line in lines:
m = rgx.match(line)
if m is not None:
return 10.**int(m.group(1)[-1]),10.**int(m.group(2)[-1])
print(red("Could not find coordinate format info %FSLAX. Using default %FSLAX33"))
return 100000.,100000.
def checkBoardOutline(self, filepath):
filename = os.path.basename(filepath)
#Basic gerber checks
checkGerberFile(self, filepath)
#Compute board outline
millLines = readFileLines(filepath)
# Find factors to get absolute coordinates:
x_factor, y_factor = findCoordinateFormat(millLines)
# Initialize X & Y
x, y = 0, 0
#We can only interpret the file if coordinates are absolute
if not "G90*" in millLines:
print(yellow("Mill coordinates in %s don't seem to be absolute (G90 missing!)" % filename))
return
#Determine coordinate units
unit = parseGerberUnit(millLines)
if unit is None: #Neither inch nor mm found
print(yellow("Could not find coordinate units (mm/in) in %s" % filename))
return
#Parse the aperture list
apertures = parseGerberApertures(millLines)
selectApertureRegex = re.compile(r"(D\d+)\*")
move2DRegex = re.compile(r"X(-?\d+)Y(-?\d+)D(\d+)\*") #Move (D2) or draw (D1)
move1DRegex = re.compile(r"([XY])(-?\d+)D(\d+)\*") #With only one coordinate
#Try to interpret gerber file
minCoords = (sys.maxsize, sys.maxsize)
maxCoords = (0, 0)
lastCoords = (0, 0)
currentAperture = None
apertureUseCount = Counter()
for line in millLines:
if selectApertureRegex.match(line):
apertureCode = selectApertureRegex.match(line).group(1)
currentAperture = findAperture(apertures, apertureCode)
elif move2DRegex.match(line):
match = move2DRegex.match(line)
x = int(match.group(1)) / x_factor
y = int(match.group(2)) / y_factor
apertureUseCount[currentAperture] += 1
elif move1DRegex.match(line):
match = move1DRegex.match(line)
apertureUseCount[currentAperture] += 1
if match.group(1) == "X":
x = int(match.group(2)) / x_factor
y = lastCoords[1]
elif match.group(1) == "Y":
x = lastCoords[0]
y = int(match.group(2)) / y_factor
else: raise Exception("Internal error: Invalid coordinate type in 1D move: %s" % match.group(1))
else: continue
#Compute min/max coordinates
lastCoords = (x, y)
minCoords = (min(minCoords[0], lastCoords[0]), min(minCoords[1], lastCoords[1]))
maxCoords = (max(maxCoords[0], lastCoords[0]), max(maxCoords[1], lastCoords[1]))
#Compute board size (minimum enclosing rectangle)
boardSize = (maxCoords[0] - minCoords[0], maxCoords[1] - minCoords[1])
# Compute size of most common aperture
mostCommonAperture = apertureUseCount.most_common(1)[0][0]
# info
print(black("\tGerber offset: ({1:.2f} {0}, {2:.2f} {0})".format(unit, minCoords[0], minCoords[1])))
print(black("\tBoard size (minimum rectangle): %.1f %s x %.1f %s" % \
(boardSize[0], unit, boardSize[1], unit)))
#print(black("\tBoard outline aperture size: {0:.2f} µm".format(1e3 * mostCommonAperture.diameter), bold=True))
def checkCopperLayer(self, filepath):
#Basic gerber checks
checkGerberFile(self, filepath)
#Check if smallest aperture is < 6mil = 150um
#NOTE: We currently don't compute the clearance (way too complicated)
lines = readFileLines(filepath)
apertures = parseGerberApertures(lines)
unit = parseGerberUnit(lines)
limit = 0.125 #TODO use inches if unit == "in"
if unit == "in": limit = 0.006
for aperture in apertures:
if aperture.diameter < limit:
print(red("Aperture %s (size %.3f %s) is smaller than %.3f %s minimum width" % \
(aperture.id, aperture.diameter, unit, limit, unit)))
def checkGerberFile(self, filepath):
"""
Check if the given file is a RS-274X gerber file
- Checks for a G04 command at the beginning of the file
- Checks for a %LN command and verifies it against the filename
- Checks for a G04 #@! TF.FileFunction command
"""
filename = os.path.basename(filepath)
lines = readFileLines(filepath)
#Find G04 line (i.e. what software created the file)
if not any(map(lambda l: l.startswith("G04 "), lines)):
print(red("Couldn't find G04 command (software description) in %s. Probably not a Gerber file." % filename, bold=True))
#Find %LN line, i.e. what the creating
# software thinks the current layer is (e.g. "BottomMask")
layerNoteRegex = re.compile(r"^\%LN([^\*]+)\*%$")
fileFunctionRegex = re.compile(r"G04 #@! TF\.FileFunction,([^\*]+)\*")
layerDescription = None
for line in lines:
if layerNoteRegex.match(line):
layerDescription = layerNoteRegex.match(line).group(1)
break #Expecting only one layer note
elif fileFunctionRegex.match(line):
layerDescription = fileFunctionRegex.match(line).group(1)
layerDescription = layerDescription.split(",")
#Check if the layer note we found makes sense
if layerDescription == None: #No %LN line found
print(yellow("Couldn't find %%LN command or file function command in %s" % filename))
else: #We found a layer description. Check for sanity
if isinstance(layerDescription, list): # FileFunction command
if layerDescription not in allowedLayerNotes[self.name]:
print(red("Layer description '%s' in %s does not match any of the expected descriptions: %s" % (layerDescription, filename, allowedLayerNotes[self.name]), bold=True))
else: # %LN command
if layerDescription not in allowedLayerNotes[self.name]:
print(red("Layer description '%s' in %s does not match any of the expected descriptions: %s" % (layerDescription, filename, allowedLayerNotes[self.name]), bold=True))
def extractProjectPrefix(files):
"""
Extract a common project prefix from all files in a directory
Fails & exits if no such prefix is found
Example: [ABC.top, ABC.bot] => "ABC"
"""
commonprefix = os.path.commonprefix(files)
if not commonprefix or not commonprefix.endswith("."):
print(red("Can't extract project name from files: %s" % ", ".join(files), bold=True))
print(red("Please ensure that all files have a common filename and only differ in their extension!", bold=True))
print(red("Example: MyBoard.top, MyBoard.bot, ...", bold=True))
sys.exit(1)
return commonprefix[:-1] #Strp off dot
def checkFile(directory, expectedFile, projectName):
"Check if a given expected file exists inside a directory"
filename = projectName + expectedFile.extension
filepath = os.path.join(directory, filename)
if os.path.isfile(filepath):
print(green("Found %s data %s" % (expectedFile.format, filename)))
if expectedFile.checkFN is not None:
expectedFile.checkFN(expectedFile, filepath)
else:
print(red("File %s (%s) missing" % (filename, expectedFile.name), bold=True))
return None
return filename
ExpectedFile = namedtuple('ExpectedFile', ['extension', 'name', 'format', 'checkFN'])
expectedFiles = [
#http://www.multi-circuit-boards.eu/support/leiterplatten-daten/gerber-daten.html
ExpectedFile(".top", "Top copper layer", "RS-274X", checkCopperLayer),
ExpectedFile(".bot", "Bottom copper layer", "RS-274X", checkCopperLayer),
ExpectedFile(".smt", "Solder mask top", "RS-274X", checkGerberFile),
ExpectedFile(".smb", "Solder mask bottom", "RS-274X", checkGerberFile),
ExpectedFile(".plt", "Silk screen top", "RS-274X", checkGerberFile),
ExpectedFile(".mil", "Board outline", "RS-274X", checkBoardOutline),
#Drilling
ExpectedFile(".pth", "Plated through holes", "Excellon", checkExcellonMetric),
ExpectedFile(".npth", "Non-plated through holes", "Excellon", checkExcellonMetric),
]
if __name__ == "__main__":
#Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The directory to scan for project Gerber file")
parser.add_argument("--gerbv", action="store_true", help="Run gerbv on the files")
args = parser.parse_args()
#Perform check
files = os.listdir(args.directory)
projectName = extractProjectPrefix(files)
print(black("Project name: %s" % projectName))
checkedFiles = [checkFile(args.directory, f, projectName) for f in expectedFiles]
unknownFiles = set(files) - set(checkedFiles)
if unknownFiles:
print(red("Found unknown files: %s" % ",".join(unknownFiles)))
#Open viewer if enabled
if args.gerbv:
filePaths = [os.path.join(args.directory, f) for f in files]
subprocess.call(["gerbv"] + filePaths)
|
scavarda/mysql-dbcompare
|
mysql-utilities-1.6.0/scripts/mysqldbcompare.py
|
#!/usr/bin/env python
#
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the operations to perform database consistency checking
on two databases.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os
import re
import sys
from mysql.utilities.exception import UtilError, FormatError
from mysql.utilities.command.dbcompare import database_compare
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.dbcompare import DEFAULT_SPAN_KEY_SIZE
from mysql.utilities.common.pattern_matching import REGEXP_OBJ_NAME
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.messages import (PARSE_ERR_DB_PAIR,
PARSE_ERR_DB_PAIR_EXT,
PARSE_ERR_DB_MISSING_CMP,
PARSE_ERR_SPAN_KEY_SIZE_TOO_LOW)
from mysql.utilities.common.options import (add_difftype, add_verbosity,
check_verbosity,
add_changes_for, add_reverse,
add_format_option,
add_character_set_option,
add_ssl_options, get_ssl_dict,
setup_common_options,
check_password_security)
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting,
quote_with_backticks)
# Constants
NAME = "MySQL Utilities - mysqldbcompare "
DESCRIPTION = "mysqldbcompare - compare databases for consistency"
USAGE = "%prog --server1=user:pass@host:port:socket " + \
"--server2=user:pass@host:port:socket db1:db2"
PRINT_WIDTH = 75
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Setup the command parser
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, server=False)
# Connection information for the source server
parser.add_option("--server1", action="store", dest="server1",
type="string", default="root@localhost:3306",
help="connection information for first server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>].")
# Connection information for the destination server
parser.add_option("--server2", action="store", dest="server2",
type="string", default=None,
help="connection information for second server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>].")
# Add character set option
add_character_set_option(parser)
# Output format
add_format_option(parser, "display the output in either grid (default), "
"tab, csv, or vertical format", "grid")
# Add skips
parser.add_option("--skip-checksum-table", action="store_true",
dest="no_checksum_table",
help="skip CHECKSUM TABLE step in data consistency "
"check.")
parser.add_option("--skip-object-compare", action="store_true",
dest="no_object_check",
help="skip object comparison step.")
parser.add_option("--skip-row-count", action="store_true",
dest="no_row_count",
help="skip row count step.")
parser.add_option("--skip-diff", action="store_true",
dest="no_diff",
help="skip the object diff step.")
parser.add_option("--skip-data-check", action="store_true",
dest="no_data",
help="skip data consistency check.")
# Skip check of table options.
parser.add_option("--skip-table-options", action="store_true",
dest="skip_tbl_opts",
help="skip check of all table options (e.g., "
"AUTO_INCREMENT, ENGINE, CHARSET, etc.).")
# Add display width option
parser.add_option("--width", action="store", dest="width",
type="int", help="display width",
default=PRINT_WIDTH)
# run-all-tests mode
parser.add_option("-a", "--run-all-tests", action="store_true",
dest="run_all_tests",
help="do not abort when a diff test fails")
# Add compact option for resulting diff
parser.add_option("-c", "--compact", action="store_true",
dest="compact", help="compact output from a diff.")
# turn off binlog mode
parser.add_option("--disable-binary-logging", action="store_true",
default=False, dest="toggle_binlog",
help="turn binary logging off during operation if "
"enabled (SQL_LOG_BIN=1). Note: may require SUPER "
"privilege. Prevents compare operations from being "
"written to the binary log.")
# add the span key option
parser.add_option(
"--span-key-size", action="store", default=DEFAULT_SPAN_KEY_SIZE,
type="int", dest="span_key_size",
help="changes the size of the key used for compare table contents. A "
"higher value can help to get more accurate results comparing "
"large databases, but may slow the algorithm. Default value is "
"{0}.".format(DEFAULT_SPAN_KEY_SIZE)
)
# add the use indexes option
parser.add_option(
"--use-indexes", action="store", type="string", default='',
dest="use_indexes",
help="for each table, indicate which index to use as if were a "
"primary key (each of his columns must not allow null values)."
)
# Add verbosity and quiet (silent) mode
add_verbosity(parser, True)
# Add difftype option
add_difftype(parser, True)
# Add the direction (changes-for)
add_changes_for(parser)
# Add show reverse option
add_reverse(parser)
# Add ssl options
add_ssl_options(parser)
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args, "# ")
# Warn if quiet and verbosity are both specified
check_verbosity(opt)
# check unique keys
ukey_regexp = re.compile(r'(?:(?:;){{0,1}}{0}\.{0})'
''.format(REGEXP_OBJ_NAME))
db_idxes_l = None
# Split the table names considering backtick quotes
if opt.use_indexes:
grp = ukey_regexp.findall(opt.use_indexes)
if not grp:
parser.error("Can't parse the specified --use-indexes argument {0}"
"".format(opt.use_indexes))
db_idxes_l = []
for table, index in grp:
table_uc = (table if is_quoted_with_backticks(table)
else quote_with_backticks(table))
index_uc = (index if is_quoted_with_backticks(index)
else quote_with_backticks(index))
db_idxes_l.append((table_uc, index_uc))
# Set options for database operations.
options = {
"quiet": opt.quiet,
"verbosity": opt.verbosity,
"difftype": opt.difftype,
"run_all_tests": opt.run_all_tests,
"width": opt.width,
"no_checksum_table": opt.no_checksum_table,
"no_object_check": opt.no_object_check,
"no_diff": opt.no_diff,
"no_row_count": opt.no_row_count,
"no_data": opt.no_data,
"format": opt.format,
"toggle_binlog": opt.toggle_binlog,
"changes-for": opt.changes_for,
"reverse": opt.reverse,
"span_key_size": opt.span_key_size,
"skip_table_opts": opt.skip_tbl_opts,
"charset": opt.charset,
"use_indexes": db_idxes_l,
"compact": opt.compact
}
# Add ssl options to options instead of connection.
options.update(get_ssl_dict(opt))
# Parse server connection values
server2_values = None
try:
server1_values = parse_connection(opt.server1, None, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Server1 connection values invalid: %s." % err)
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Server1 connection values invalid: %s." % err.errmsg)
if opt.server2:
try:
server2_values = parse_connection(opt.server2, None, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Server2 connection values invalid: %s." % err)
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Server2 connection values invalid: %s." % err.errmsg)
# Check for arguments
if len(args) == 0:
parser.error(PARSE_ERR_DB_MISSING_CMP)
if opt.span_key_size and opt.span_key_size < DEFAULT_SPAN_KEY_SIZE:
parser.error(
PARSE_ERR_SPAN_KEY_SIZE_TOO_LOW.format(
s_value=opt.span_key_size, default=DEFAULT_SPAN_KEY_SIZE))
# Operations to perform:
# 1) databases exist
# 2) check object counts
# 3) check object differences
# 4) check row counts among the tables
# 5) check table data consistency
res = True
check_failed = False
arg_regexp = re.compile(r'{0}(?:(?:\:){0})?'.format(REGEXP_OBJ_NAME))
for db in args:
# Split the database names considering backtick quotes
grp = arg_regexp.match(db)
if not grp:
parser.error(PARSE_ERR_DB_PAIR.format(db_pair=db,
db1_label='db1',
db2_label='db2'))
parts = grp.groups()
matched_size = len(parts[0])
if not parts[1]:
parts = (parts[0], parts[0])
else:
# add 1 for the separator ':'
matched_size = matched_size + 1
matched_size = matched_size + len(parts[1])
# Verify if the size of the databases matched by the REGEX is equal
# to the initial specified string. In general, this identifies the
# missing use of backticks.
if matched_size != len(db):
parser.error(PARSE_ERR_DB_PAIR_EXT.format(db_pair=db,
db1_label='db1',
db2_label='db2',
db1_value=parts[0],
db2_value=parts[1]))
# Remove backtick quotes (handled later)
db1 = remove_backtick_quoting(parts[0]) \
if is_quoted_with_backticks(parts[0]) else parts[0]
db2 = remove_backtick_quoting(parts[1]) \
if is_quoted_with_backticks(parts[1]) else parts[1]
try:
res = database_compare(server1_values, server2_values,
db1, db2, options)
print
except UtilError:
_, e, _ = sys.exc_info()
print("ERROR: %s" % e.errmsg)
sys.exit(1)
if not res:
check_failed = True
if check_failed and not opt.run_all_tests:
break
if not opt.quiet:
print
if check_failed:
print("# Database consistency check failed.")
else:
sys.stdout.write("# Databases are consistent")
if (opt.no_object_check or opt.no_diff or
opt.no_row_count or opt.no_data or opt.skip_tbl_opts):
sys.stdout.write(" given skip options specified")
print(".")
print("#\n# ...done")
if check_failed:
sys.exit(1)
sys.exit()
|
stdlib-js/stdlib
|
lib/node_modules/@stdlib/stats/base/dists/geometric/stdev/test/fixtures/python/runner.py
|
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate fixtures."""
import os
import json
from numpy.random import rand
from scipy.stats import geom
# Get the file path:
FILE = os.path.realpath(__file__)
# Extract the directory in which this file resides:
DIR = os.path.dirname(FILE)
def gen(p, name):
"""Generate fixture data and write to file.
# Arguments
* `p`: success probability
* `name::str`: output filename
# Examples
``` python
python> p = rand(1000)
python> gen(p, './data.json')
```
"""
y = list()
for i in p:
y.append(geom.std(i))
# Store data to be written to file as a dictionary:
data = {
"p": p.tolist(),
"expected": y
}
# Based on the script directory, create an output filepath:
filepath = os.path.join(DIR, name)
# Write the data to the output filepath as JSON:
with open(filepath, "w") as outfile:
json.dump(data, outfile)
def main():
"""Generate fixture data."""
p = rand(1000)
gen(p, "data.json")
if __name__ == "__main__":
main()
|
lupyuen/RaspberryPiImage
|
home/pi/TP-IoT/send_simple_sensor_data.py
|
#!/usr/bin/env python3
# Send DHT22 sensor data periodically to AWS IoT.
import time
import datetime
import ssl
import json
import paho.mqtt.client as mqtt
import dht22
import pigpio
# TODO: Change this to the name of our Raspberry Pi, also known as our "Thing Name"
deviceName = "g88pi"
# Public certificate of our Raspberry Pi, as provided by AWS IoT.
deviceCertificate = "tp-iot-certificate.pem.crt"
# Private key of our Raspberry Pi, as provided by AWS IoT.
devicePrivateKey = "tp-iot-private.pem.key"
# Root certificate to authenticate AWS IoT when we connect to their server.
awsCert = "aws-iot-rootCA.crt"
isConnected = False
# Assume we connected the DHT22 Sensor, YwRobot Light Sensor, L-934ID-5V LED as follows:
# DHT22/AM2302 --> Raspberry Pi:
# + --> GPIO 8
# Out --> GPIO 22
# - --> Ground (Pin 14)
power = 8
temp_sensor = 22
# YwRobot Light Sensor --> Raspberry Pi:
# Ground --> Ground (Pin 9)
# VCC --> 3.3V Power (Pin 1)
# DOUT --> GPIO 4
light_sensor = 4
# L-934ID-5V LED --> Raspberry Pi
# + --> GPIO 25
# Ground --> Ground (Pin 20)
led = 25
# This is the main logic of the program. We connect to AWS IoT via MQTT, send sensor data periodically to AWS IoT,
# and handle any actuation commands received from AWS IoT.
def main():
global isConnected
# Create an MQTT client for connecting to AWS IoT via MQTT.
client = mqtt.Client(deviceName + "_sr") # Client ID must be unique because AWS will disconnect any duplicates.
client.on_connect = on_connect # When connected, call on_connect.
client.on_message = on_message # When message received, call on_message.
client.on_log = on_log # When logging debug messages, call on_log.
# Set the certificates and private key for connecting to AWS IoT. TLS 1.2 is mandatory for AWS IoT and is supported
# only in Python 3.4 and later, compiled with OpenSSL 1.0.1 and later.
client.tls_set(awsCert, deviceCertificate, devicePrivateKey, ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2)
# Connect to AWS IoT server. Use AWS command line "aws iot describe-endpoint" to get the address.
print("Connecting to AWS IoT...")
client.connect("A1P01IYM2DOZA0.iot.us-west-2.amazonaws.com", 8883, 60)
# Start a background thread to process the MQTT network commands concurrently, including auto-reconnection.
client.loop_start()
# Prepare the DHT22 sensor. Ensure we don't read from the DHT22 within 2 seconds, else it will eventually hang.
dht22_sensor = dht22.Sensor(pigpio.pi(), temp_sensor, power=power)
# Loop forever.
while True:
try:
# If we are not connected yet to AWS IoT, wait 1 second and try again.
if not isConnected:
time.sleep(1)
continue
# Read DHT22 sensor values. Skip if we detect an error.
dht22_sensor.trigger()
if dht22_sensor.bad_checksum() + dht22_sensor.short_message() + dht22_sensor.missing_message() + \
dht22_sensor.sensor_resets() != 0 or dht22_sensor.temperature() < 0 or dht22_sensor.humidity() < 0:
print(("DHT22 may be connected incorrectly: temperature={:3.1f}, humidity={:3.1f}, bad_checksum={}, " +
"short_message={}, missing_message={}, sensor_resets={}")
.format(dht22_sensor.temperature(), dht22_sensor.humidity(), dht22_sensor.bad_checksum(),
dht22_sensor.short_message(), dht22_sensor.missing_message(),
dht22_sensor.sensor_resets()))
continue
# Prepare our sensor data in JSON format.
payload = {
"state": {
"reported": {
"temperature": round(dht22_sensor.temperature(), 1),
"humidity": round(dht22_sensor.humidity(), 1),
"timestamp": datetime.datetime.now().isoformat()
}
}
}
print("Sending sensor data to AWS IoT...\n" +
json.dumps(payload, indent=4, separators=(',', ': ')))
# Publish our sensor data to AWS IoT via the MQTT topic, also known as updating our "Thing Shadow".
client.publish("$aws/things/" + deviceName + "/shadow/update", json.dumps(payload))
print("Sent to AWS IoT")
# Wait 30 seconds before sending the next set of sensor data.
time.sleep(30)
except KeyboardInterrupt:
# Stop the program when we press Ctrl-C.
break
except Exception as e:
# For all other errors, we wait a while and resume.
print("Exception: " + str(e))
time.sleep(10)
continue
# This is called when we are connected to AWS IoT via MQTT.
# We subscribe for notifications of desired state updates.
def on_connect(client, userdata, flags, rc):
global isConnected
isConnected = True
print("Connected to AWS IoT")
# Subscribe to our MQTT topic so that we will receive notifications of updates.
topic = "$aws/things/" + deviceName + "/shadow/update/accepted"
print("Subscribing to MQTT topic " + topic)
client.subscribe(topic)
# This is called when we receive a subscription notification from AWS IoT.
# If this is an actuation command, we execute it.
def on_message(client, userdata, msg):
# Convert the JSON payload to a Python dictionary.
# The payload is in binary format so we need to decode as UTF-8.
payload2 = json.loads(msg.payload.decode("utf-8"))
print("Received message, topic: " + msg.topic + ", payload:\n" +
json.dumps(payload2, indent=4, separators=(',', ': ')))
# Print out log messages for tracing.
def on_log(client, userdata, level, buf):
print("Log: " + buf)
# Start the main program.
main()
|
scottdangelo/RemoveVolumeMangerLocks
|
cinder/tests/unit/test_netapp_nfs.py
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
import itertools
import os
import shutil
import unittest
from lxml import etree
import mock
from mox3 import mox as mox_lib
import six
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
from cinder import utils as cinder_utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap import (nfs_7mode
as netapp_nfs_7mode)
from cinder.volume.drivers.netapp.dataontap import (nfs_cmode
as netapp_nfs_cmode)
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils
from oslo_config import cfg
CONF = cfg.CONF
CONNECTION_INFO = {
'hostname': 'fake_host',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
}
FAKE_CONNECTION_INFO_HTTP = {
'hostname': '127.0.0.1',
'transport_type': 'http',
'port': None,
'username': 'admin',
'password': 'pass',
'vserver': 'openstack',
}
FAKE_CONNECTION_INFO_HTTPS = dict(FAKE_CONNECTION_INFO_HTTP,
transport_type='https')
FAKE_7MODE_CONNECTION_INFO_HTTP = dict(FAKE_CONNECTION_INFO_HTTP)
FAKE_7MODE_CONNECTION_INFO_HTTP.pop('vserver')
FAKE_7MODE_CONNECTION_INFO_HTTP['vfiler'] = 'test_vfiler'
FAKE_7MODE_CONNECTION_INFO_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
transport_type='https')
SEVEN_MODE_CONNECTION_INFO = dict(
itertools.chain(CONNECTION_INFO.items(),
{'vfiler': 'test_vfiler'}.items()))
FAKE_VSERVER = 'fake_vserver'
def create_configuration():
configuration = mox_lib.MockObject(conf.Configuration)
configuration.append_config_values(mox_lib.IgnoreArg())
configuration.max_over_subscription_ratio = 20.0
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
configuration.nfs_used_ratio = .95
configuration.nfs_oversub_ratio = 1.0
configuration.netapp_server_hostname = CONNECTION_INFO['hostname']
configuration.netapp_transport_type = CONNECTION_INFO['transport_type']
configuration.netapp_server_port = CONNECTION_INFO['port']
configuration.netapp_login = CONNECTION_INFO['username']
configuration.netapp_password = CONNECTION_INFO['password']
configuration.netapp_vfiler = SEVEN_MODE_CONNECTION_INFO['vfiler']
return configuration
class FakeVolume(object):
def __init__(self, host='', size=0):
self.size = size
self.id = hash(self)
self.name = None
self.host = host
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetAppCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_MNT_POINT = '/mnt/nfs'
def setUp(self):
super(NetAppCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.zapi_client = mock.Mock()
config = self._driver.configuration
config.netapp_vserver = FAKE_VSERVER
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
location = '127.0.0.1:/nfs'
host = 'hostname@backend#' + location
volume = FakeVolume(host, 1)
snapshot = FakeSnapshot(1)
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
self.mock_object(drv, '_do_qos_for_volume')
self.mock_object(utils, 'get_volume_extra_specs')
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(expected_result, loc)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_cmode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER,
**CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
@mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
def test_check_for_setup_error(self, mock_ssc_api_permission_check,
mock_super_check_for_setup_error):
self._driver.zapi_client = mock.Mock()
self._driver.check_for_setup_error()
mock_ssc_api_permission_check.assert_called_once_with(
self._driver.zapi_client)
mock_super_check_for_setup_error.assert_called_once_with()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
drv.zapi_client = mox.CreateMockAnything()
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv.zapi_client.get_vol_by_junc_vserver('openstack', '/nfs').AndReturn(
'nfsvol')
drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return netapp_api.NaElement(response_el).get_children()
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
mox_lib.IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file_at_path')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True)
drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image('',
volume,
('image_location', None),
{'id': 'image_id'}, '')
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._clone_backing_file_for_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_clone_compatible')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_clone_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(), 'raw',
run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
# Valid metdata
[{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-0'},
# missing metadata
{'metadata': {},
'url': 'file:///opt/stack/data/glance/image-id-1'},
# missing location_type
{'metadata': {'location_type': None},
'url': 'file:///opt/stack/data/glance/image-id-2'},
# non-nfs location_type
{'metadata': {'location_type': 'not-NFS'},
'url': 'file:///opt/stack/data/glance/image-id-3'},
# missing share_location
{'metadata': {'location_type': 'nfs',
'share_location': None},
'url': 'file:///opt/stack/data/glance/image-id-4'},
# missing mountpoint
{'metadata': {'location_type': 'nfs',
'share_location': 'nfs://host/path',
# Pre-kilo we documented "mount_point"
'mount_point': '/opt/stack/data/glance'},
'url': 'file:///opt/stack/data/glance/image-id-5'},
# Valid metadata
{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-6'}])
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id-0", locations)
self.assertIn("nfs://host/path/image-id-6", locations)
self.assertEqual(2, len(locations))
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id", locations)
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_cluster'
configuration.netapp_storage_protocol = 'nfs'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
configuration.netapp_vserver = 'openstack'
configuration.nfs_shares_config = '/nfs'
return configuration
@mock.patch.object(utils, 'get_volume_extra_specs')
def test_check_volume_type_mismatch(self, get_specs):
if not hasattr(self._driver, 'vserver'):
return unittest.skip("Test only applies to cmode driver")
get_specs.return_value = {'thin_volume': 'true'}
self._driver._is_share_vol_type_match = mock.Mock(return_value=False)
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self._driver._check_volume_type, 'vol',
'share', 'file')
get_specs.assert_called_once_with('vol')
self._driver._is_share_vol_type_match.assert_called_once_with(
'vol', 'share', 'file')
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname):
drv = self._driver
share = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
modified_share = '10.12.142.11:/export/test_file_name'
modified_vol_ref = drv._convert_vol_ref_share_name_to_share_ip(share)
self.assertEqual(modified_share, modified_vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
@mock.patch.object(os.path, 'isfile', return_value=True)
def test_get_share_mount_and_vol_from_vol_ref(self, mock_isfile,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
(share, mount, file_path) = \
drv._get_share_mount_and_vol_from_vol_ref(vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, share)
self.assertEqual(self.TEST_MNT_POINT, mount)
self.assertEqual('test_file_name', file_path)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-id': '1234546'}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT2, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-name': self.TEST_NFS_EXPORT2}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1073741824)
def test_manage_existing_get_size(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(1, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_get_size_round_up(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(2, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value='badfloat')
def test_manage_existing_get_size_error(self, get_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing_get_size, volume, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._check_volume_type = mock.Mock()
self.stubs.Set(drv, '_execute', mock.Mock())
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
shutil.move = mock.Mock()
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
location = drv.manage_existing(volume, vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location'])
drv._check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_move_fails(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'volume-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
mock_check_volume_type = drv._check_volume_type = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
drv._execute = mock.Mock(side_effect=OSError)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing, volume, vol_ref)
mock_check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(nfs_base, 'LOG')
def test_unmanage(self, mock_log):
drv = self._driver
self.mock_object(utils, 'get_valid_qos_policy_group_info')
volume = FakeVolume()
volume['id'] = '123'
volume['provider_location'] = '/share'
drv.unmanage(volume)
self.assertEqual(1, mock_log.info.call_count)
class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
self.mock_object(netapp_nfs_cmode, 'LOG')
self._fake_empty_qos_policy_group_info = {
'legacy': None,
'spec': None,
}
self._fake_legacy_qos_policy_group_info = {
'legacy': {
'policy_name': 'qos_policy_1'
},
'spec': None,
}
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info
volume_info = self._driver.create_volume(FakeVolume(host, 1))
self.assertEqual(fake_share, volume_info.get('provider_location'))
self.assertEqual(0, utils.LOG.warning.call_count)
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
def test_create_volume_with_legacy_qos_policy(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
fake_volume = FakeVolume(host, 1)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value =\
self._fake_legacy_qos_policy_group_info
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume')
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual('localhost:myshare',
volume_info.get('provider_location'))
mock_set_qos.assert_called_once_with(
fake_volume, self._fake_legacy_qos_policy_group_info)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
nfs_base.NetAppNfsDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"])
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file_at_path.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
"""Test direct NetApp 7 Mode driver."""
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(common.na_utils, 'LOG')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver(
configuration=create_configuration())
self._driver.zapi_client = mock.Mock()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_7mode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS,
port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
def test_check_for_setup_error(self, mock_super_check_for_setup_error):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 20)
self.assertIsNone(self._driver.check_for_setup_error())
mock_super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_old_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_no_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = None
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
drv._get_export_ip_path(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
def test_clone_backing_file_for_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
try:
drv._clone_backing_file_for_volume(volume_name, clone_name,
volume_id)
except Exception as e:
if isinstance(e, netapp_api.NaApiError):
pass
else:
raise
mox.VerifyAll()
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
super(NetApp7modeNfsDriverTestCase, self)._set_config(
configuration)
configuration.netapp_storage_family = 'ontap_7mode'
return configuration
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
|
CAMI-challenge/AMBER
|
features/steps/cli.py
|
import os
from behave import *
@when(u'I run the command')
def step_impl(context):
context.output = context.env.run(
"bash -c '{}'".format(os.path.expandvars(context.text)),
expect_error = True,
expect_stderr = True)
def download_file(link, out):
import wget
return wget.download(link, out)
def get_stream(context, stream):
assert stream in ['stderr', 'stdout'], "Unknown output stream {}".format(stream)
return getattr(context.output, stream)
def assert_file_exists(file_):
assert os.path.isfile(file_), "The file \"{}\" does not exist.".format(file_)
def get_env_path(context, file_):
return os.path.join(context.env.cwd, file_)
def get_data_file_path(file_):
dir_ = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir_, '..', '..', 'test', file_)
@then(u'the file "{}" should exist')
def step_impl(context, file_):
assert_file_exists(get_env_path(context, file_))
@then(u'the exit code should be {code}')
def step_impl(context, code):
returned = context.output.returncode
assert returned == int(code), \
"Process should return exit code {} but was {}".format(code, returned)
@given(u'I download the file "{link}" to "{dest}"')
def step_impl(context, link, dest):
import sys
normalized_dest = get_env_path(context, dest)
sys.stdout = sys.__stdout__
download_file(link, normalized_dest)
@given(u'I copy the example data files')
def step_impl(context):
import shutil
for row in context.table.rows:
shutil.copy(get_data_file_path(row['source']),
get_env_path(context, row['dest']))
@then(u'the {stream} should contain')
def step_impl(context, stream):
output = get_stream(context, stream)
assert context.text in output
@given(u'I downloaded the scripts')
def create_tmp_dir(context):
dir_ = os.path.dirname(os.path.abspath(__file__))
tmp_dir = os.path.join(dir_, '..', '..', "tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
@given(u'I create the directory "{directory}"')
def step_impl(context, directory):
os.makedirs(get_env_path(context, directory))
|
arcpy/sample-gp-tools
|
SharePackage2/ago.py
|
#! -*- coding: utf-8; mode: python -*-
"""
ago.py: interact with an ArcGIS Portal instance
"""
import arcpy
import json
import time
import datetime
import mimetypes
import gzip
import random
import string
import getpass
import sys
import os
from io import BytesIO
import codecs
import uuid
import shutil
try:
import http.client as client
import urllib.parse as parse
from urllib.request import urlopen as urlopen
from urllib.request import Request as request
from urllib.request import HTTPError, URLError
from urllib.parse import urlencode as encode
# py2
except ImportError:
import httplib as client
from urllib2 import urlparse as parse
from urllib2 import urlopen as urlopen
from urllib2 import Request as request
from urllib2 import HTTPError, URLError
from urllib import urlencode as encode
unicode = str
# Valid package types on portal
ITEM_TYPES = {
".LPK": "Layer Package",
".LPKX": "Layer Package",
".MPK": "Map Package",
".MPKX": "Map Package",
".GPK": "Geoprocessing Package",
".GPKX": "Geoprocessing Package",
".RPK": "Rule Package",
".GCPK": "Locator Package",
".PPKX": "Project Package",
".APTX": "Project Template",
".TPK": "Tile Package",
".MMPK": "Mobile Map Package",
".VTPK": "Vector Tile Package"
}
class MultipartFormdataEncoder(object):
"""
Usage: request_headers, request_data =
MultipartFormdataEncoder().encodeForm(params, files)
Inputs:
params = {"f": "json", "token": token, "type": item_type,
"title": title, "tags": tags, "description": description}
files = {"file": {"filename": "some_file.sd", "content": content}}
Note: content = open(file_path, "rb").read()
"""
def __init__(self):
self.boundary = uuid.uuid4().hex
self.content_type = {
"Content-Type": "multipart/form-data; boundary={}".format(self.boundary)
}
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
"""
Yield bytes for body. See class description for usage.
"""
encoder = codecs.getencoder('utf-8')
for key, value in fields.items():
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(
self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for key, value in files.items():
if "filename" in value:
filename = value.get("filename")
content_disp = 'Content-Disposition: form-data;name=' + \
'"{}"; filename="{}"\r\n'.format(key, filename)
content_type = 'Content-Type: {}\r\n'.format(
mimetypes.guess_type(filename)[0] or 'application/octet-stream')
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(content_disp)
yield encoder(content_type)
yield encoder('\r\n')
if "content" in value:
buff = value.get("content")
yield (buff, len(buff))
yield encoder('\r\n')
yield encoder('--{}--\r\n'.format(self.boundary))
def encodeForm(self, fields, files):
body = BytesIO()
for chunk, chunk_len in self.iter(fields, files):
body.write(chunk)
self.content_type["Content-Length"] = str(len(body.getvalue()))
return self.content_type, body.getvalue()
class AGOLHelper(object):
"""
Interact with an ArcGIS Portal instance, such as ArcGIS Online. Must be
initialized with either the login() method, or by reusing an existing
OAuth token via token_login(). Covers approximately 1/3 of the complete
API, primarily focused on the common operations around uploading and
managing services and web maps.
"""
def __init__(self, portal_url=None, token=None, debug=False):
if portal_url is None:
self.portal_url = arcpy.GetActivePortalURL()
else:
self.portal_url = portal_url
# in the absence of information, default to HTTP
self.protocol = 'https'
self.is_arcgis_online = False
url_parts = self._parse_url(self.portal_url)
if url_parts:
if url_parts.scheme:
self.protocol = url_parts.scheme
self.host = self._normalize_host_url(url_parts)
if url_parts.netloc == 'www.arcgis.com':
self.is_arcgis_online = True
self.protocol = 'https'
else:
arcpy.AddError(NO_PORTAL_URL_MSG)
sys.exit()
self.base_url = '{}://{}/sharing/rest'.format(self.protocol, self.host)
self.secure_url = 'https://{}/sharing/rest'.format(self.host)
self.token = token
self.debug = debug
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': ('ago.py -- ArcGIS portal module 0.1')
}
self.portal_name = None
self.portal_info = {}
self.username = None
self.login_method = None
self.expiration = None
self._password = None
def login(self, username=None, password=None, repeat=None):
"""
Get a sign-in token from provided credentials.
Arguments:
username -- user to sign in with
password -- password for user (default: use getpass)
Returns:
None
"""
if username:
self.username = username
else:
arcpy.AddError("Expected user name. None given.")
return
if password is None:
self._password = getpass.getpass()
else:
self._password = password
token_url = '{}/generateToken?'.format(self.secure_url)
token_parameters = {
'username': username,
'password': self._password,
'referer': "http://maps.esri.com",
'expiration': 600,
}
token_response = self.url_request(
token_url, token_parameters, 'POST', repeat=repeat)
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires'] / 1000) - datetime.timedelta(seconds=1)
if 'ssl' in token_response:
if token_response['ssl']:
self.protocol = 'https'
else:
self.protocol = 'http'
# update base information with token
self.information()
self.login_method = 'password'
else:
arcpy.AddError("Unable to get signin token.")
return
def token_login(self):
"""
Get a sign-in token generated from ArcPy.
Arguments:
None
Returns:
None
"""
# NOTE side-effects
token_response = arcpy.GetSigninToken()
if token_response and 'token' in token_response:
self.token = token_response['token']
self.expiration = datetime.datetime.fromtimestamp(
token_response['expires']) - datetime.timedelta(seconds=1)
if self.debug:
msg = 'Received token starting with ' + \
'"{}", valid for {} minutes.'.format(
self.token[0:10], self.valid_for)
arcpy.AddMessage(msg)
# update base information with token
self.information()
self.login_method = 'token'
else:
arcpy.AddError("Unable to get signin token.")
return
@property
def valid_for(self):
"""
Length the current token is valid for, in minutes.
Returns:
An integer of minutes token remains valid
"""
valid = False
if self.expiration and isinstance(self.expiration, datetime.datetime):
valid = (self.expiration - datetime.datetime.now()).seconds / 60
return valid
def information(self):
"""
Get portal 'self' information.
Arguments:
None
Returns:
A dictionary returned from portals/self.
"""
# NOTE side-effects; do separately
url = '{}/portals/self'.format(self.base_url)
portal_info = self.url_request(url)
self.portal_info = portal_info
self.portal_name = portal_info['portalName']
url = '{}/community/self'.format(self.base_url)
user_info = self.url_request(url)
self.username = user_info['username']
return self.portal_info
def random_string(self, length):
"""
Generate a random string of ASCII letters.
Arguments:
length = number of characters
Returns:
random string
"""
alpha = string.ascii_letters
return ''.join(random.choice(alpha) for ii in range(length + 1))
def encode_multipart_data(self, data, files):
"""
Create multipart boundaries between file streams.
Arguments:
data -- input data
files -- input files
Returns:
A tuple containing response -- (body, headers)
"""
boundary = self.random_string(30)
def get_content_type(filename):
""" Try to determine content type based on file extension."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_field(field_name):
""" Encode fields using multipart specification."""
return('--' + boundary,
'Content-Disposition: form-data; name="%s"' % field_name,
'', str(data[field_name]))
def encode_file(field_name):
""" Encode file data using multipart specification."""
filename = str(files[field_name])
return('--' + boundary,
'Content-Disposition: form-data;'
'name="{}"; filename="{}"'.format(field_name, filename),
'Content-Type: %s' % get_content_type(filename),
'', open(filename, 'rb').read())
lines = []
for name in data:
lines.extend(encode_field(name))
for name in files:
lines.extend(encode_file(name))
lines.extend(('--%s--' % boundary, ''))
body = '\r\n'.join(lines)
headers = {
'content-type': 'multipart/form-data; boundary=' + boundary,
'content-length': str(len(body))
}
return body, headers
def list_folders(self):
"""
List available user folders.
Returns:
A dictionary of folder titles to ids.
"""
folders = {}
folder_request = self.user_content()['folders']
for folder in folder_request:
folders[folder['title']] = folder['id']
return folders
def create_folder(self, name):
"""
Create a folder item.
property to the created folder.
Arguments:
name -- folder name to create
Returns:
folder item id.
"""
folder = None
url = '{}/content/users/{}/createFolder'.format(
self.base_url, self.username)
parameters = {'title': name}
response = self.url_request(url, parameters, 'POST')
if response is not None and 'folder' in response:
folder = response['folder']['id']
return folder
def item(self, item_id=None, repeat=None):
"""
Get back information about a particular item. Must have read
access to the item requested.
Arguments:
item_id: the portal id of the desired item.
Returns:
Dictionary from item response.
"""
results = {}
if item_id:
url = '{}/content/items/{}'.format(self.base_url, item_id)
results = self.url_request(url, repeat=repeat)
return results
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
if None in items:
arcpy.AddError(EMPTY_ITEM_MSG)
return
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
if self.debug:
msg = "Moving items, using {} with parameters {}, got {}".format(
url, parameters, move_response)
arcpy.AddMessage(msg)
return move_response
def move_items(self, target_folder_id, items):
"""
Move items to a target folder.
Arguments:
target_folder_id: folder id to move items to
items: list of one or more item ids to move
Returns:
None
"""
# Test if we have a None object somewhere
# This could potentially be the case if one of the previous
# portal responses was not successful.
url = '{}/content/users/{}/moveItems'.format(
self.base_url, self.username)
parameters = {
'folder': target_folder_id,
'items': ','.join(map(str, items))
}
move_response = self.url_request(url, parameters, request_type='POST')
return move_response
def share_items(self, groups=None, everyone=False, org=False, items=None):
"""
Shares one or more items with the specified groups. Can only share
items with groups the user belongs to. Can also share with
the users' current organization, and the public.
Arguments:
groups -- a list of group IDs to share items with
everyone -- publicly share the item (default: False)
org -- share with the users' organization (default: False)
items -- a list of item IDs to update sharing properties on
Returns:
A dictionary of JSON objects, one per item containing the item,
whether sharing was successful, any groups sharing failed with,
and any errors.
"""
if (groups is None and not everyone and not org) or not items:
if self.debug:
arcpy.AddWarning("Invalid sharing options set.")
return
# If shared with everyone, have to share with Org as well
if everyone:
org = True
url = '{}/content/users/{}/shareItems'.format(
self.base_url, self.username)
parameters = {
'everyone': everyone,
'org': org,
'items': ','.join(map(str, items))
}
# sharing with specific groups is optional
if groups:
parameters['groups'] = ','.join(map(str, groups))
sharing_response = self.url_request(url, parameters, 'POST')
if self.debug:
msg = "Sharing items, using {} with parameters {}, got {}".format(
url, parameters, sharing_response)
arcpy.AddMessage(msg)
return sharing_response
def search(self, title=None, item_type=None, group=None,
owner=None, item_id=None, repeat=None, num=10, id_only=True, name=None):
"""
Search for items, a partial implementation of the
search operation of the ArcGIS REST API. Requires one of:
title, item_type, group, owner.
Arguments:
title -- item title
item_type -- item type
group -- item group
owner -- username of item owner
item_id -- item id
repeat -- retry the search, up to this number of times (default: None)
num -- number of results (default: 10)
id_only -- return only IDs of results. If False, will return
full JSON results. (default: True)
Returns:
A list of search results item ids.
"""
query_types = {
'title': title,
'type': item_type,
'group': group,
'owner': self.username, #owner,
'id': item_id,
'name': name
}
query_parts = []
for (label, value) in list(query_types.items()):
if value:
query_parts.append('{}: "{}"'.format(label, value))
if len(query_parts) == 0:
return
elif len(query_parts) == 1:
query = query_parts[0]
else:
query = " AND ".join(query_parts)
if self.debug:
arcpy.AddMessage("Searching for '{}'".format(query))
url = '{}/search'.format(self.base_url)
parameters = {
'num': num,
'q': query
}
response_info = self.url_request(url, parameters)
results = []
if response_info and 'results' in response_info:
if response_info['total'] > 0:
for item in response_info['results']:
if 'id' in item:
if id_only:
results.append(item['id'])
else:
results.append(item)
if self.debug:
if results:
arcpy.AddMessage("Got results! Found items: {}".format(results))
else:
arcpy.AddMessage("No results found.")
# occasional timing conflicts are happening; repeat search until we
# can continue -- the result should be empty since we just deleted it.
if repeat and not results:
repeat -= 1
if repeat <= 0:
return
time.sleep(1)
results = self.search(
title=title, item_type=item_type, group=group, owner=owner,
item_id=item_id, repeat=repeat, num=num, id_only=id_only)
return results
def user(self, username=None):
"""
A user resource representing a registered user of the portal.
Arguments:
username -- user of interest
Returns:
A dictionary of the JSON response.
"""
if username is None:
username = self.username
url = '{}/community/users/{}'.format(self.base_url, username)
return self.url_request(url)
def user_content(self, username=None):
"""
User items and folders.
Arguments:
username -- user of interest
Returns:
A dictionary of user items and folders.
"""
if username is None:
username = self.username
url = '{}/content/users/{}'.format(self.base_url, username)
return self.url_request(url)
def list_groups(self, username=None):
"""
List users' groups.
Returns:
A dictionary of group titles to ids.
"""
groups = {}
if username is None:
username = self.username
groups_request = self.user(username)['groups']
for group in groups_request:
groups[group['title']] = group['id']
return groups
def add_item(self, file_to_upload, username=None, folder_id=None, itemtype=None, params=None):
"""
Adds an item to the portal.
All items are added as multipart. Once the item is added,
Add Part will be called.
Returns:
The response/item_id of the item added.
"""
if username is None:
username = self.username
url = '{}/content/users/{}/{}/addItem'.format(self.base_url, username, folder_id)
parameters = {
'multipart': 'true',
'filename': file_to_upload,
}
if params:
parameters.update(params)
if itemtype:
parameters['type'] = itemtype
else:
try:
file_name, file_ext = os.path.splitext(os.path.basename(file_to_upload))
itemtype = ITEM_TYPES[file_ext.upper()]
except KeyError:
msg = "Unable to upload file: {}, unknown type".format(
file_to_upload)
arcpy.AddError(msg)
return
details = {'filename': file_to_upload}
add_item_res = self.url_request(
url, parameters, request_type="POST", files=details)
return self._add_part(file_to_upload, add_item_res['id'], itemtype)
def _add_part(self, file_to_upload, item_id, upload_type=None):
""" Add item part to an item being uploaded."""
def read_in_chunks(file_object, chunk_size=10000000):
"""Generate file chunks (default: 10MB)"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
url = '{}/content/users/{}/items/{}/addPart'.format(
self.base_url, self.username, item_id)
with open(file_to_upload, 'rb') as f:
for part_num, piece in enumerate(read_in_chunks(f), start=1):
title = os.path.splitext(os.path.basename(file_to_upload))[0]
files = {"file": {"filename": file_to_upload, "content": piece}}
params = {
'f': "json",
'token': self.token,
'partNum': part_num,
'title': title,
'itemType': 'file',
'type': upload_type
}
headers, data = MultipartFormdataEncoder().encodeForm(params, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
def item_status(self, item_id, username=None):
"""
Gets the status of an item.
Returns:
The item's status. (partial | processing | failed | completed)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/status'.format(
self.base_url, username, item_id)
return self.url_request(url)
def commit(self, item_id, username=None):
"""
Commits an item that was uploaded as multipart
Returns:
Result of calling commit. (success: true| false)
"""
if username is None:
username = self.username
url = '{}/content/users/{}/items/{}/commit'.format(
self.base_url, username, item_id)
return self.url_request(url)
def update_item(self, item_id, metadata, username=None, folder_id=None, title=None):
"""
Updates metadata parts of an item.
Metadata expected as a tuple
Returns:
Result of calling update. (success: true | false)
"""
if username is None:
username = self.username
url = "{}/content/users/{}/{}/items/{}/update".format(
self.base_url, username, folder_id, item_id)
parameters = {
'snippet': metadata[0],
'description': metadata[1],
'tags': metadata[2],
'accessInformation': metadata[3],
'licenseInfo': metadata[4],
'token': self.token,
'f': 'json'
}
if title:
parameters['title'] = title
if len(metadata) > 5:
parameters['thumbnail'] = metadata[5]
with open(metadata[5], 'rb') as f:
d = f.read()
files = {"thumbnail": {"filename": metadata[5], "content": d }}
headers, data = MultipartFormdataEncoder().encodeForm(parameters, files)
resp = self.url_request(url, data, "MULTIPART", headers, repeat=1)
return resp
else:
return self.url_request(url, parameters, 'POST')
def url_request(self, in_url, request_parameters=None, request_type='GET',
additional_headers=None, files=None, repeat=0):
"""
Make a request to the portal, provided a portal URL
and request parameters, returns portal response. By default,
returns a JSON response, and reuses the current token.
Arguments:
in_url -- portal url
request_parameters -- dictionary of request parameters.
request_type -- HTTP verb (default: GET)
additional_headers -- any headers to pass along with the request.
files -- any files to send.
repeat -- repeat the request up to this number of times.
Returns:
dictionary of response from portal instance.
"""
# multipart requests pre-encode the parameters
if request_type == 'MULTIPART':
parameters = request_parameters
else:
parameters = {'f': 'json'}
# if we haven't logged in yet, won't have a valid token
if self.token:
parameters['token'] = self.token
if request_parameters:
parameters.update(request_parameters)
if request_type == 'GET':
req = request('?'.join((in_url, encode(parameters))))
elif request_type == 'MULTIPART':
req = request(in_url, parameters)
elif request_type == 'WEBMAP':
if files:
req = request(in_url, *self.encode_multipart_data(parameters, files))
else:
arcpy.AddWarning("Multipart request made, but no files provided.")
return
else:
req = request(
in_url, encode(parameters).encode('UTF-8'), self.headers)
if additional_headers:
for key, value in list(additional_headers.items()):
req.add_header(key, value)
req.add_header('Accept-encoding', 'gzip')
try:
response = urlopen(req)
except HTTPError as e:
arcpy.AddWarning("{} {} -- {}".format(
HTTP_ERROR_MSG, in_url, e.code))
return
except URLError as e:
arcpy.AddWarning("{} {} -- {}".format(
URL_ERROR_MSG, in_url, e.reason))
return
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
with gzip.GzipFile(fileobj=buf) as gzip_file:
response_bytes = gzip_file.read()
else:
response_bytes = response.read()
response_text = response_bytes.decode('UTF-8')
# occasional timing conflicts; repeat until we get back a valid response.
response_json = json.loads(response_text)
# Check that data returned is not an error object
if not response_json or "error" in response_json:
rerun = False
if repeat > 0:
repeat -= 1
rerun = True
# token has expired. Revalidate, then rerun request
if response_json['error']['code'] == 498:
if self.debug:
arcpy.AddWarning("token invalid, retrying.")
if self.login_method is 'token':
# regenerate the token if we're logged in via the application
self.token_login()
else:
self.login(self.username, self._password, repeat=0)
# after regenerating token, we should have something long-lived
if not self.token or self.valid_for < 5:
arcpy.AddError("Unable to get signin token.")
return
rerun = True
if rerun:
time.sleep(2)
response_json = self.url_request(
in_url, request_parameters, request_type,
additional_headers, files, repeat)
return response_json
def save_file(self, url, saveFile):
"""Saves a file to a given location"""
if self.token:
url += "?token={}".format(self.token)
data = urlopen(url).read()
with open(saveFile, "wb") as out_file:
out_file.write(data)
return saveFile
def assert_json_success(self, data):
"""A function that checks that the input JSON object
is not an error object."""
success = False
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
arcpy.AddWarning("{} {}".format("JSON object returned an error.", str(obj)))
elif 'error' in obj:
err = obj['error']
# format the error message
if 'messageCode' in err:
code = err['messageCode']
elif 'code' in err:
code = err['code']
else:
code = "No code provided."
msg = "Portal error: {}: {}".format(err['message'], code)
if 'details' in err and err['details']:
details = []
for detail in err['details']:
# only use unique detail messages
if detail is not err['message']:
details.append(detail)
if details:
msg += ". Details: {}".format("\n".join(details))
arcpy.AddWarning(msg)
else:
success = True
return success
def _parse_url(self, url=None):
""" Parse a url into components."""
results = None
if url:
results = parse.urlparse(url)
return results
def _normalize_host_url(self, parse_result):
""" Normalize a hostname to include just the validated
location and path."""
host_url = parse_result.netloc
if parse_result.path:
path = parse_result.path
if path[-1] == '/':
path = path[:-1]
host_url += path
return host_url
|
eWUDAPT/eWUDAPT-analysis
|
eWUDAPT_analysis/analysis_single.py
|
from eWUDAPT_analysis.utils import *
from pylab import *
from netCDF4 import Dataset, num2date
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
class analysis_single:
def __init__(self, args, json):
self.filename = args.filename
self.outputdir = args.outputdir
self.ncdf_definition = load_netcdf_definition(json)
self.institute, self.model, self.version = define_inst_mod_ver(args.filename)
create_directory(self.outputdir)
self.create_plots()
def create_plots(self):
'''
create plots
'''
# loop through all variables
for variable in self.ncdf_definition['variables']:
if 'time' in variable['dimensions']:
# create time series plot of surface/first level
self.plot_time_series(variable)
if (len(set(['levf', 'levh', 'levs']) & set(variable['dimensions']))==1):
dimname = sorted(set(['levf', 'levh', 'levs']) & set(variable['dimensions']))[0]
# found a vertical dimension -> create vertical profile
if variable['name'] not in ['zf', 'zh', 'zs']:
# don't plot height in meters
self.plot_vertical_profile(variable, dimname)
def plot_time_series(self, variable):
'''
create time series plots'
'''
try:
outputfig = ('time_series_' + self.institute + '_' + self.model + '_' +
self.version + '_' + variable["name"] + '.png')
outputfile = os.path.join(self.outputdir, outputfig)
ncfile = Dataset(self.filename, 'r')
time = ncfile.variables['time']
try:
dt = [num2date(step, units=time.units, calendar=time.calendar)
for step in time[:]]
except AttributeError:
# fallback
dt = [num2date(step, units='seconds since 2006-07-01 12:00:00',
calendar='gregorian') for step in time[:]]
if (len(variable['dimensions'])==1):
# plot time series, 1D variable
val = ncfile.variables[variable['name']][:]
elif (len(variable['dimensions'])==2):
# plot first level, 2D variable
# TODO: add info to title on level
val = ncfile.variables[variable['name']][:]
if (len(np.shape(val)) == len(variable['dimensions'])):
if np.shape(val)[0] == np.shape(time)[0]:
val = val[:,0]
elif np.shape(val)[1] == np.shape(time)[0]:
val = val[0,:]
else:
pass
else:
return
else:
raise Exception('Variable ' + variable['name'] +
'contains more than two dimensions')
# create the plot
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y %H:%M'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator(byhour=range(0,24,3)))
plt.plot(dt, val)
plt.gcf().autofmt_xdate()
plt.xlabel('time')
plt.ylabel(variable["long_name"] + ' [' + variable["unit"] + ']')
plt.savefig(outputfile)
plt.close()
# close netCDF file
ncfile.close()
return # TODO: savefig
except KeyError:
pass
def plot_vertical_profile(self, variable, dimname):
'''
create vertical profile plots at 0, 6, 12, 18h
'''
try:
ncfile = Dataset(self.filename, 'r')
time = ncfile.variables['time']
try:
dt = [num2date(step, units=time.units, calendar=time.calendar)
for step in time[:]]
except AttributeError:
# fallback
dt = [num2date(step, units='seconds since 2006-07-01 12:00:00',
calendar='gregorian') for step in time[:]]
# define timesteps at which vertical profiles are plotted
dt_profiles = np.arange(dt[0], dt[-1],np.timedelta64(6,'h'),
dtype='datetime64').astype(datetime.datetime)
for dt_profile in dt_profiles:
try:
idx = dt.index(dt_profile)
except ValueError:
continue
if (len(variable['dimensions'])==1):
# plot static vertical profile, 1D variable
val = ncfile.variables[variable['name']][:]
elif (len(variable['dimensions'])==2):
# plot vertical profile every 6 hours, 2D variable
# TODO: add info to title on level
val = ncfile.variables[variable['name']][:]
if (len(np.shape(val)) == len(variable['dimensions'])):
if np.shape(val)[0] == np.shape(time)[0]:
val = val[idx, :]
elif np.shape(val)[1] == np.shape(time)[0]:
val = val[: idx]
else:
pass
else:
return
else:
raise Exception('Variable ' + variable['name'] +
'contains more than two dimensions')
if (dimname=='levf'):
dimvar = 'zf'
elif (dimname=='levh'):
dimvar = 'zh'
elif (dimname=='levs'):
dimvar = 'zs'
levels = ncfile.variables[dimvar]
# create the plot
if dimvar != 'zs':
plt.plot(val, levels[idx, :])
else:
plt.plot(val, levels[:])
diminfo = get_dimension_information(self.ncdf_definition, dimname)
timestr = dt_profile.strftime('%Y-%m-%d %H:%M')
timestrplot = dt_profile.strftime('%Y-%m-%d_%H:%M')
outputfig = ('vert_profile_' + self.institute + '_' + self.model + '_' +
self.version + '_' + variable["name"] + timestrplot + '.png')
outputfile = os.path.join(self.outputdir, outputfig)
plt.title(variable['long_name'] + ' at ' + timestr)
plt.xlabel(variable["long_name"] + ' [' + variable["unit"] + ']')
plt.ylabel(levels.long_name + ' [' + levels.units + ']')
plt.savefig(outputfile)
plt.close()
# close netCDF file
ncfile.close()
return # TODO: savefig
except KeyError:
pass
|
google-research/google-research
|
optimizing_interpretability/metrics_utils.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics utils files to compute certain similarity metrics."""
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
def VerifyCompatibleImageShapes(img1, img2):
"""Checks if two image tensors are compatible for metric computation.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: The first images tensor.
img2: The second images tensor.
Returns:
A tuple of the first tensor shape, the second tensor shape, and a list of
tf.Assert() implementing the checks.
Raises:
ValueError: when static shape check fails.
"""
shape1 = img1.shape.with_rank_at_least(3)
shape2 = img2.shape.with_rank_at_least(3)
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(reversed(shape1[:-3]), reversed(shape2[:-3])):
# For TF V1 compatibility.
try:
dim1 = dim1.value
dim2 = dim2.value
except AttributeError:
pass
if not (dim1 in (None, 1) or dim2 in (None, 1) or dim1 == dim2):
raise ValueError('Two images are not compatible: %s and %s' %
(shape1, shape2))
else:
raise ValueError('The two images do not have a defined shape.')
# Now assign shape tensors.
shape1, shape2 = tf.shape_n([img1, img2])
checks = []
checks.append(
tf.Assert(
tf.greater_equal(tf.size(shape1), 3), [shape1, shape2], summarize=10))
checks.append(
tf.Assert(
tf.reduce_all(tf.equal(shape1[-3:], shape2[-3:])), [shape1, shape2],
summarize=10))
return shape1, shape2, checks
def _SSIMHelper(x, y, reducer, max_val, compensation=1.0):
r"""Helper function to SSIM.
Arguments:
x: first set of images.
y: first set of images.
reducer: Function that computes 'local' averages from set of images. For
non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]), and
for convolutional version, this is usually tf.nn.avg_pool or tf.nn.conv2d
with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure and the contrast-structure measure.
"""
c1 = (0.01 * max_val)**2
c2 = (0.03 * max_val)**2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = tf.square(mean0) + tf.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_xy + c2) / (cov_xx + cov_yy + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_xy = \sum_i w_i (x_i - mu_x) (y_i - mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(tf.square(x) + tf.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def SSIMWithoutFilter(a,
b,
max_val=255.0,
filter_size=(8, 8),
strides=None,
spatial_average=True,
channel_average=True):
"""Computes unfiltered SSIM index between a and b per channel.
Arguments:
a: First set of patches.
b: Second set of patches.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
filter_size: Determines the moving average filter size to aggregate the SSIM
over. Must be a sequence of length two: [filter_height, filter_width].
strides: The strides of the moving average filter. Must be None or a
sequence of length two: [row_stride, col_stride]. If None, defaults to
`filter_size`.
spatial_average: If True, return the mean value across space. Otherwise,
return the full 2D spatial map.
channel_average: If True, return the mean value across channels. Otherwise,
return SSIM per channel.
Returns:
The SSIM index for each individual element in the batch.
For color images, SSIM is averaged after computed in each channel
separately.
Raises:
ValueError: if a and b don't have the broadcastable shapes, or the ranks of
a and b are not at least 3.
"""
# Enforce rank and shape checks.
shape1, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
a = tf.identity(a)
if strides is None:
strides = filter_size
n = float(np.prod(filter_size))
kernel = tf.fill(
dims=list(filter_size) + [shape1[-1], 1],
value=tf.constant(1 / n, dtype=a.dtype))
strides = [1] + list(strides) + [1]
def reducer(x): # pylint: disable=invalid-name
shape = tf.shape(x)
# DepthwiseConv2D takes rank 4 tensors. Flatten leading dimensions.
x = tf.reshape(x, shape=tf.concat([[-1], shape[-3:]], 0))
y = tf.nn.depthwise_conv2d(x, kernel, strides=strides, padding='VALID')
return tf.reshape(y, tf.concat([shape[:-3], tf.shape(y)[1:]], 0))
compensation = (n - 1) / n
luminance, cs = _SSIMHelper(a, b, reducer, max_val, compensation)
ssim = luminance * cs
reduce_axis = [-3, -2] if spatial_average else []
if channel_average:
reduce_axis.append(-1)
if reduce_axis:
ssim = tf.reduce_mean(ssim, axis=reduce_axis)
return ssim
def GradientDifferenceLoss(img1,
img2,
dist_func=tf.square,
reduce_func=tf.reduce_sum,
name=None):
"""Returns an op that calculates loss between image gradients.
This function assumes that `img1` and `img2` are image batches,
i.e. [batch_size, row, col, channels].
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
name: Namespace in which to embed the computation.
Returns:
A tensor with size [batch_size] containing the finite difference edge loss
for each image pair in the batch.
"""
with tf.name_scope(name, 'GDL', [img1, img2]):
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
dy1, dx1 = tf.image.image_gradients(img1)
dy2, dx2 = tf.image.image_gradients(img2)
diff = dist_func(dy1 - dy2) + dist_func(dx1 - dx2)
loss = reduce_func(diff, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
def PSNR(a, b, max_val=255.0, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
Arguments:
a: first set of images.
b: second set of images.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The shape of the returned tensor is
[batch_size, 1].
"""
with tf.name_scope(name, 'PSNR', [a, b]):
psnr = tf.image.psnr(a, b, max_val=max_val, name=name)
_, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
return tf.identity(psnr)
def ClippedPSNR(img1,
img2,
min_val=0.0,
max_val=255.0,
clip=True,
quantize=True,
max_psnr=100.0,
name=None):
"""Return average Clipped PSNR between `a` and `b`.
Arguments:
img1: first set of images.
img2: second set of images.
min_val: smallest valid value for a pixel.
max_val: largest valid value for a pixel.
clip: If True, pixel values will be clipped to [`min_value`, `max_value`].
quantize: If True, pixel values will be rounded before calculating PSNR.
max_psnr: If not None, PSNR will be clipped by this value before rounding.
name: namespace to embed the computation in.
Returns:
PSNR between img1 and img2 or average PSNR if input is a batch.
"""
with tf.name_scope(name, 'clipped_psnr', [img1, img2]):
if quantize:
img1 = tf.round(img1)
img2 = tf.round(img2)
if clip:
img1 = tf.clip_by_value(img1, min_val, max_val)
img2 = tf.clip_by_value(img2, min_val, max_val)
value_range = max_val - min_val
psnr = PSNR(img1, img2, max_val=value_range)
if max_psnr is not None:
psnr = tf.minimum(psnr, max_psnr)
return tf.reduce_mean(psnr)
def SobelEdgeLoss(img1, img2, dist_func=tf.square, reduce_func=tf.reduce_sum):
"""Returns an op that calculates Sobel edge loss between two images.
Arguments:
img1: First image batch.
img2: Second image batch.
dist_func: A TensorFlow op to apply to edge map differences (e.g. tf.square
for L2 or tf.abs for L1).
reduce_func: A TensorFlow op to reduce edge map distances into a single loss
per image pair (e.g. tf.reduce_sum for a gradient or tf.reduce_mean for a
per-pixel average score).
Returns:
A tensor with size [batch_size] containing the Sobel edge loss for each
image pair in the batch.
"""
_, _, checks = VerifyCompatibleImageShapes(img1, img2)
# Sobel tensor has shape [batch_size, h, w, d, num_kernels].
sobel1 = tf.image.sobel_edges(img1)
sobel2 = tf.image.sobel_edges(img2)
diff = dist_func(sobel1 - sobel2)
# To match GDL, sum across dy and dx regardless of reduce_func.
edge_maps = tf.reduce_sum(diff, axis=-1)
# Reduce over all dimensions except batch_size.
loss = reduce_func(edge_maps, list(range(-3, 0)))
with tf.control_dependencies(checks):
return tf.identity(loss)
|
openstack/os-win
|
os_win/tests/functional/test_vhdutils.py
|
# Copyright 2019 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
from os_win import constants
from os_win.tests.functional import test_base
from os_win import utilsfactory
class VhdUtilsTestCase(test_base.OsWinBaseFunctionalTestCase):
def setUp(self):
super(VhdUtilsTestCase, self).setUp()
self._vhdutils = utilsfactory.get_vhdutils()
self._diskutils = utilsfactory.get_diskutils()
self._pathutils = utilsfactory.get_pathutils()
def _create_temp_vhd(self, size_mb=32,
vhd_type=constants.VHD_TYPE_DYNAMIC):
f = tempfile.TemporaryFile(suffix='.vhdx', prefix='oswin_vhdtest_')
f.close()
self._vhdutils.create_vhd(f.name, vhd_type,
max_internal_size=size_mb << 20)
self.addCleanup(os.unlink, f.name)
return f.name
def _create_temp_symlink(self, target, target_is_dir):
f = tempfile.TemporaryFile(prefix='oswin_vhdtest_link_')
f.close()
self._pathutils.create_sym_link(f.name, target, target_is_dir)
if target_is_dir:
self.addCleanup(os.rmdir, f.name)
else:
self.addCleanup(os.unlink, f.name)
return f.name
def test_attach_detach(self):
vhd_path = self._create_temp_vhd()
# We'll make sure that we can detect attached vhds, even when the
# paths contain symlinks.
vhd_link = self._create_temp_symlink(vhd_path, target_is_dir=False)
vhd_dir_link = self._create_temp_symlink(os.path.dirname(vhd_path),
target_is_dir=True)
# A second, indirect link.
vhd_link2 = os.path.join(vhd_dir_link,
os.path.basename(vhd_path))
def _check_attached(expect_attached):
# Let's try both approaches and all paths pointing to our image.
paths = [vhd_path, vhd_link, vhd_link2]
for path in paths:
self.assertEqual(
expect_attached,
self._vhdutils.is_virtual_disk_file_attached(path))
self.assertEqual(
expect_attached,
self._diskutils.is_virtual_disk_file_attached(path))
_check_attached(False)
try:
self._vhdutils.attach_virtual_disk(vhd_path)
_check_attached(True)
finally:
self._vhdutils.detach_virtual_disk(vhd_path)
_check_attached(False)
|
mayfield/snowflake-connector-python
|
test/test_converter_more_timestamp.py
|
from datetime import timedelta, datetime
import pytz
from dateutil.parser import parse
from snowflake.connector.converter import (SnowflakeConverter)
def test_fetch_various_timestamps(conn_cnx):
"""
More coverage of timestamp
Currently TIMESTAMP_LTZ is not tested.
"""
PST_TZ = "America/Los_Angeles"
epoch_times = [
'1325568896',
'-2208943503',
'0',
'-1'
]
timezones = [
'+07:00',
'+00:00',
'-01:00',
'-09:00'
]
fractions = '123456789'
data_types = ['TIMESTAMP_TZ', 'TIMESTAMP_NTZ']
data = []
for dt in data_types:
for et in epoch_times:
if dt == 'TIMESTAMP_TZ':
for tz in timezones:
tzdiff = (int(tz[1:3]) * 60 + int(tz[4:6])) * (
-1 if tz[0] == '-' else 1)
tzinfo = SnowflakeConverter._generate_tzinfo_from_tzoffset(
tzdiff)
ts = datetime.fromtimestamp(float(et), tz=tzinfo)
data.append({
'scale': 0,
'dt': dt,
'inp': ts.strftime(
'%Y-%m-%d %H:%M:%S{tz}'.format(tz=tz)),
'out': ts
})
for idx in range(len(fractions)):
scale = idx + 1
if idx + 1 != 6: # SNOW-28597
ts0 = datetime.fromtimestamp(float(et), tz=tzinfo)
ts0_str = ts0.strftime(
'%Y-%m-%d %H:%M:%S.{ff}{tz}'.format(
ff=fractions[:idx + 1], tz=tz))
ts1 = parse(ts0_str)
data.append({
'scale': scale,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
elif dt == 'TIMESTAMP_LTZ':
# WIP. this test work in edge case
tzinfo = pytz.timezone(PST_TZ)
ts0 = datetime.fromtimestamp(float(et))
ts0 = pytz.utc.localize(ts0, is_dst=False).astimezone(tzinfo)
ts0_str = ts0.strftime('%Y-%m-%d %H:%M:%S')
ts1 = ts0
data.append({
'scale': 0,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
for idx in range(len(fractions)):
ts0 = datetime.fromtimestamp(float(et))
ts0 = pytz.utc.localize(ts0, is_dst=False).astimezone(
tzinfo)
ts0_str = ts0.strftime(
'%Y-%m-%d %H:%M:%S.{ff}'.format(
ff=fractions[:idx + 1]
))
ts1 = ts0 + timedelta(seconds=float(
'0.{0}'.format(fractions[:idx + 1])))
data.append({
'scale': idx + 1,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
else:
ts0_str = datetime.fromtimestamp(
float(et)).strftime('%Y-%m-%d %H:%M:%S')
ts1 = parse(ts0_str)
data.append({
'scale': 0,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
for idx in range(len(fractions)):
ts0_str = datetime.fromtimestamp(float(et)).strftime(
'%Y-%m-%d %H:%M:%S.{ff}'.format(
ff=fractions[:idx + 1]))
ts1 = parse(ts0_str)
data.append({
'scale': idx + 1,
'dt': dt,
'inp': ts0_str,
'out': ts1
})
sql = "SELECT "
for d in data:
sql += "'{inp}'::{dt}({scale}), ".format(
inp=d['inp'],
dt=d['dt'],
scale=d['scale']
)
sql += "1"
with conn_cnx() as cnx:
cur = cnx.cursor()
cur.execute("""
ALTER SESSION SET TIMEZONE='{tz}';
""".format(tz=PST_TZ))
rec = cur.execute(sql).fetchone()
for idx, d in enumerate(data):
comp, lower, higher = _in_range(d['out'], rec[idx])
assert comp, 'data: {d}: target={target}, lower={lower}, higher={' \
'higher}'.format(
d=d, target=rec[idx], lower=lower, higher=higher)
def _in_range(reference, target):
lower = reference - timedelta(microseconds=1)
higher = reference + timedelta(microseconds=1)
return lower <= target <= higher, lower, higher
|
cloudbase/nova-vix-driver
|
vix/utils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import psutil
import re
import socket
from nova import exception
class VixException(exception.NovaException):
pass
def get_host_memory_info():
mem_info = psutil.phymem_usage()
return (mem_info.total, mem_info.free)
def get_disk_info(path):
disk_info = psutil.disk_usage(path)
return (disk_info.total, disk_info.free)
def get_cpu_count():
return multiprocessing.cpu_count()
def get_free_port():
sock = socket.socket()
try:
sock.bind(('', 0))
return sock.getsockname()[1]
finally:
sock.close()
def remove_lines(file_name, pattern):
lines = []
found = False
with open(file_name, 'r') as f:
for s in f.readlines():
if re.match(pattern, s):
found = True
else:
lines.append(s)
if found:
with open(file_name, 'w') as f:
f.writelines(lines)
return found
def get_text(file_name, pattern):
with open(file_name, 'r') as f:
for s in f.readlines():
m = re.match(pattern, s)
if m:
return m.groups()
def replace_text(file_name, pattern, replacement):
lines = []
found = False
with open(file_name, 'r') as f:
for s in f.readlines():
if re.match(pattern, s):
found = True
new_s = re.sub(pattern, replacement, s)
else:
new_s = s
lines.append(new_s)
if found:
with open(file_name, 'w') as f:
f.writelines(lines)
return found
|
Imperat/SSU-Courses
|
ssu-modeling/chapter2/subchapter1/game.py
|
import random
class Game(object):
firstCount = 0
secondCount = 0
playerNumber = 0
def play(self):
currentCount = random.choice(range(1,7))
if (self.playerNumber == 0):
self.firstCount += currentCount
self.playerNumber = 1
if self.firstCount == self.secondCount:
return 1
if self.firstCount >= 30:
return 2
else:
self.secondCount += currentCount
self.playerNumber = 0
if self.secondCount == self.firstCount:
return 3
if self.secondCount >= 30:
return 4
return 0
def playGame():
game = Game()
while(True):
res = game.play()
if res == 2 or res == 4:
return 1
elif res == 1 or res == 3:
return 0
s = 0
for i in range(1000):
s += playGame()
print s/1000.0
|
clach04/pyopenroad
|
pyvosa.py
|
#!/usr/bin/env python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
"""OpenROAD app server read-only vosa (nee vasa) experiment
either under Windows or cross platform with Java.
Based on code in OpenROAD 4.1 vasa_apps.asp
Windows/DCOM requires CPython Win32 Extensions to access DCOM, available
from http://sourceforge.net/projects/pywin32/
For Windows later than Windows XP also see
http://technet.microsoft.com/en-us/library/cc738214%28v=ws.10%29.aspx
short summary add remote windows users to "Distributed COM Users"
Java requires Jython.
Running
=======
Either have an app server configured locally that can be used
or setup OpenROAD client and setup Operating System evironment
variable to point to server, e.g.:
Windows
set TEST_ORSERVER=app_server_hostname
Unix
TEST_ORSERVER=app_server_hostname
export TEST_ORSERVER
If TEST_ORSERVER is not set and no command line argument is given, localhost is assumed.
"""
import os
import sys
from pprint import pprint
import orserver
APPSERVER_HOSTNAME = os.environ.get('TEST_ORSERVER') or 'localhost'
def doit(appserver_hostname=APPSERVER_HOSTNAME):
w4gl_image = 'ASA_ns'
connection_mode = None
connection_mode = ''
#connection_mode = 'unauthenticated'
#connection_mode = 'compressed'
#connection_mode = 'unauthenticated-compressed'
# declare OpenROAD RemoteServer and Helper objects
rso = orserver.or_connect(w4gl_image, appserver_hostname, connection_mode=connection_mode)
aso = orserver.get_aso_and_attach_rso(rso)
func_sig = 'b_arr_UCAkaDetail=UCARRAY; b_arr_UCAkaDetail.i_aka_detail_id=INTEGER; b_arr_UCAkaDetail.i_asolib=INTEGER; b_arr_UCAkaDetail.i_servertype=INTEGER; b_arr_UCAkaDetail.v_aka_name=STRING; b_arr_UCAkaDetail.v_cmdflags=STRING; b_arr_UCAkaDetail.v_imagefile=STRING; b_arr_UCAkaDetail.v_serverlocation=STRING; b_UCSPOConfig=USERCLASS; b_UCSPOConfig.i_MaxDispatchers=INTEGER; b_UCSPOConfig.i_MaxTotalSlaves=INTEGER; b_UCSPOConfig.i_PrfMonInterval=INTEGER; b_UCSPOConfig.i_PrfMonLevel=INTEGER; b_UCSPOConfig.i_PurgeInterval=INTEGER; b_UCSPOConfig.i_TraceFileAppend=INTEGER; b_UCSPOConfig.i_TraceInterval=INTEGER; b_UCSPOConfig.i_TraceLevel=INTEGER; b_UCSPOConfig.v_TraceFileName=STRING'
result = orserver.callproc(aso, 'GetAllNameServerData', func_sig=func_sig)
print result
print ''
pprint(result)
rso.disconnect()
def main(argv=None):
if argv is None:
argv = sys.argv
try:
hostname = argv[1]
doit(hostname)
except IndexError:
doit()
return 0
if __name__ == "__main__":
sys.exit(main())
|
jason-weirather/Au-public
|
iron/pythonlib/Bio/Format/GPD.py
|
import uuid, sys, time, re
import Bio.Structure
from Bio.Range import GenomicRange
from subprocess import Popen, PIPE
# This whole format is a subclass of the Transcript subclass
class GPD(Bio.Structure.Transcript):
def __init__(self,gpd_line):
# Only store the line and ID at first.
self._line = gpd_line.rstrip()
self._id = str(uuid.uuid4())
m = re.match('[^\t]+\t[^\t]+\t([^\t]+)\t[^\t]+\t([^\t]+)\t([^\t]+)',gpd_line)
self._range = GenomicRange(m.group(1),int(m.group(2))+1,int(m.group(3)))
self._initialized = False
# Most of GPD has not been set yet. Each method accessing GPD
# will need to check to see if initialize has been run
def _initialize(self): # Wait to initialize to speed up streaming
if self._initialized: return # nothing to do if its done
self._initialized = True
self._entry = _line_to_entry(self._line)
self._exons = []
self._junctions = []
self._payload = []
self._direction = self.value('strand')
self._gene_name = self.value('gene_name')
self._transcript_name = self.value('name')
self._name = None
for i in range(0,self.value('exonCount')):
ex = Bio.Structure.Exon(GenomicRange(self.value('chrom'),self.value('exonStarts')[i]+1,self.value('exonEnds')[i]))
self._exons.append(ex)
if self.value('exonCount') > 1:
for i in range(0,self.value('exonCount')-1):
l = GenomicRange(self.value('chrom'),self.value('exonEnds')[i],self.value('exonEnds')[i])
r = GenomicRange(self.value('chrom'),self.value('exonStarts')[i+1]+1,self.value('exonStarts')[i+1]+1)
junc = Bio.Structure.Junction(l,r)
junc.set_exon_left(self._exons[i])
junc.set_exon_right(self._exons[i+1])
self._junctions.append(junc)
self._sequence = None
@property
def junctions(self):
self._initialize()
return self._junctions
@property
def exons(self):
self._initialize()
return self._exons
# override, we are garunteed to have the range since we initialize on reading a line
def get_range(self):
return self._range
def __str__(self):
return self.get_gpd_line()
#output the original gpd line
# Overrides Structure.Transcript
def get_gpd_line(self):
return self._line
def get_line(self):
return self._line
def value(self,key):
self._initialize()
return self._entry[key]
def _line_to_entry(line):
f = line.rstrip().split("\t")
d = {}
d['gene_name'] = f[0]
d['name'] = f[1]
d['chrom'] = f[2]
d['strand'] = f[3]
d['txStart'] = int(f[4])
d['txEnd'] = int(f[5])
d['cdsStart'] = int(f[6])
d['cdsEnd'] = int(f[7])
d['exonCount'] = int(f[8])
exonstarts = [int(x) for x in f[9].rstrip(",").split(",")]
d['exonStarts'] = exonstarts
exonends = [int(x) for x in f[10].rstrip(",").split(",")]
d['exonEnds'] = exonends
return d
class GPDStream:
def __init__(self,fh):
self.fh = fh
def read_entry(self):
ln = self.fh.readline()
if not ln: return False
gpd = GPD(ln)
return gpd
def __iter__(self):
return self
def next(self):
r = self.read_entry()
if not r:
raise StopIteration
else:
return r
class SortedOutputFile:
def __init__(self,filename,type='location',tempdir=None):
if type not in ['location','name']:
sys.stderr.write("ERROR: must be type location or name\n")
sys.exit()
self._gz = False
self._fh = open(filename,'w')
self._sh = None
if filename[-3:] == '.gz':
self._gz = True
self._pipes = []
scmd = "sort -k1,1 -k2,2"
if type == 'location':
scmd = "sort -k3,3 -k5,5n -k6,6n -k4,4"
if tempdir: scmd += " -T "+tempdir.rstrip('/')+'/'
if self._gz:
cmd1 = "gzip"
p1 = Popen(cmd1.split(),stdout=self._fh,stdin=PIPE,close_fds=True)
p2 = Popen(scmd.split(),stdout=p1.stdin,stdin=PIPE,close_fds=True)
self._sh = p2.stdin
self._pipes = [p2,p1]
else:
p = Popen(scmd.split(),stdout=self._fh,stdin=PIPE)
self._sh = p.stdin
self._pipes = [p]
def write(self,value):
self._sh.write(value)
def close(self):
#self._sh.flush()
#self._sh.close()
for p in self._pipes:
#p.stdin.flush()
#p.stdin.close()
p.communicate()
#self._pipes[0].stdin.flush()
#self._pipes[0].stdin.close()
#self._pipes[1].stdin.flush()
#self._pipes[1].stdin.close()
self._fh.close()
|
tmills/uda
|
scripts/learn_pivots_gradient_reversal.py
|
#!/usr/bin/python
import sys
from os.path import join,exists,dirname
import random
import numpy as np
from numpy.random import randint, choice
from sklearn.datasets import load_svmlight_file
from torch.autograd import Function, Variable
import torch.nn as nn
import torch.optim as optim
import torch
from torch import FloatTensor
from uda_common import read_feature_groups, read_feature_lookup
# the concepts here come from: https://github.com/fungtion/DANN/blob/master/models/model.py
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
# Totally random:
# output = Variable(torch.randn(grad_output.shape).cuda()) + grad_output * 0 # grad_output.neg() * ctx.alpha
# zero (ignores domain)
# output = 0 * grad_output
# reversed (default)
output = grad_output.neg() * ctx.alpha
# print("Input grad is %s, output grad is %s" % (grad_output.data.cpu().numpy()[:10], output.data.cpu().numpy()[:10]))
return output, None
# Instead of this, may be able to just regularize by forcing off-diagonal to zero
# didn't work bc/ of memory issues
class StraightThroughLayer(nn.Module):
def __init__(self, input_features):
super(StraightThroughLayer, self).__init__()
self.vector = nn.Parameter( torch.randn(1, input_features) )
#self.add_module('pass-through vector', self.vector)
def forward(self, input_data):
# output = input_data * self.vector
output = torch.mul(input_data, self.vector)
return output
class PivotLearnerModel(nn.Module):
def __init__(self, input_features):
super(PivotLearnerModel, self).__init__()
# Feature takes you from input to the "representation"
# self.feature = nn.Sequential()
# straight through layer just does an element-wise product with a weight vector
num_features = input_features
# num_features = 200
# self.vector = nn.Parameter( torch.randn(1, input_features) )
self.feature = nn.Sequential()
self.feature.add_module('input_layer', StraightThroughLayer(input_features))
# self.feature.add_module('feature_layer', nn.Linear(input_features, num_features))
self.feature.add_module('relu', nn.ReLU(True))
# Standard feed forward layer:
# num_features = 200
# self.feature.add_module('input_layer', nn.Linear(input_features, num_features))
# self.feature.add_module('relu', nn.ReLU(True))
# task_classifier maps from a feature representation to a task prediction
self.task_classifier = nn.Sequential()
self.task_classifier.add_module('task_binary', nn.Linear(num_features, 1))
self.task_classifier.add_module('task_sigmoid', nn.Sigmoid())
# domain classifier maps from a feature representation to a domain prediction
self.domain_classifier = nn.Sequential()
# hidden_nodes = 100
# self.domain_classifier.add_module('domain_hidden', nn.Linear(num_features, hidden_nodes, bias=False))
# self.domain_classifier.add_module('relu', nn.ReLU(True))
self.domain_classifier.add_module('domain_classifier', nn.Linear(num_features, 1, bias=False))
# # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
self.domain_classifier.add_module('domain_sigmoid', nn.Sigmoid())
# self.domain_classifier2 = nn.Sequential()
# self.domain_classifier2.add_module('domain_linear', nn.Linear(num_features, 1, bias=False))
# # # self.domain_classifier.add_module('domain_predict', nn.Linear(100, 1))
# self.domain_classifier2.add_module('domain_sigmoid', nn.Sigmoid())
def forward(self, input_data, alpha):
feature = self.feature(input_data)
# feature = input_data * self.vector
task_prediction = self.task_classifier(feature)
# Get domain prediction
reverse_feature = ReverseLayerF.apply(feature, alpha)
domain_prediction = self.domain_classifier(reverse_feature)
# Only domain predictor 1 is reversed
# domain_prediction2 = self.domain_classifier2(feature)
return task_prediction, domain_prediction #(domain_prediction, domain_prediction2)
def main(args):
if len(args) < 1:
sys.stderr.write("Required arguments: <data file> [backward True|False]\n")
sys.exit(-1)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
if len(args) > 1:
backward = bool(args[1])
print("Direction is backward based on args=%s" % (args[1]))
else:
backward = False
print("Direction is forward by default")
# Read the data:
goal_ind = 2
domain_weight = 1.0
reg_weight = 0.1
lr = 0.01
epochs = 1000
batch_size = 50
sys.stderr.write("Reading source data from %s\n" % (args[0]))
all_X, all_y = load_svmlight_file(args[0])
# y is 1,2 by default, map to 0,1 for sigmoid training
all_y -= 1 # 0/1
# continue to -1/1 for softmargin training:
# all_y *= 2 # 0/2
# all_y -= 1 # -1/1
num_instances, num_feats = all_X.shape
domain_map = read_feature_groups(join(dirname(args[0]), 'reduced-feature-groups.txt'))
domain_inds = domain_map['Domain']
feature_map = read_feature_lookup(join(dirname(args[0]), 'reduced-features-lookup.txt'))
direction = 1 if backward else 0
sys.stderr.write("using domain %s as source, %s as target\n" %
(feature_map[domain_inds[direction]],feature_map[domain_inds[1-direction]]))
source_instance_inds = np.where(all_X[:,domain_inds[direction]].toarray() > 0)[0]
X_source = all_X[source_instance_inds,:]
X_source[:, domain_inds[direction]] = 0
X_source[:, domain_inds[1-direction]] = 0
y_source = all_y[source_instance_inds]
num_source_instances = X_source.shape[0]
num_train_instances = int(X_source.shape[0] * 0.8)
X_task_train = X_source[:num_train_instances,:]
y_task_train = y_source[:num_train_instances]
X_task_valid = X_source[num_train_instances:, :]
y_task_valid = y_source[num_train_instances:]
target_instance_inds = np.where(all_X[:,domain_inds[1-direction]].toarray() > 0)[0]
X_target = all_X[target_instance_inds,:]
X_target[:, domain_inds[direction]] = 0
X_target[:, domain_inds[1-direction]] = 0
num_target_train = int(X_target.shape[0] * 0.8)
X_target_train = X_target[:num_target_train,:]
# y_target_train = y_target[:num_target_train]
X_target_valid = X_target[num_target_train:, :]
# y_target_dev = y_target[num_target_train:]
# y_test = all_y[target_instance_inds]
num_target_instances = X_target_train.shape[0]
model = PivotLearnerModel(num_feats).to(device)
task_loss_fn = nn.BCELoss()
domain_loss_fn = nn.BCELoss()
l1_loss = nn.L1Loss()
#task_loss_fn.cuda()
# domain_loss_fn.cuda()
# l1_loss.cuda()
optimizer = optim.Adam(model.parameters())
# optimizer = optim.SGD(model.parameters(), lr=lr)
# weights = model.vector
try:
weights = model.feature.input_layer.vector
print("Before training:")
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Zeros=%d, near-zeros=%d" % (num_zeros, near_zeros))
except:
pass
# Main training loop
inds = np.arange(num_train_instances)
for epoch in range(epochs):
epoch_loss = 0
model.train()
# Do a training epoch:
for batch in range( 1+ ( num_train_instances // batch_size ) ):
model.zero_grad()
start_ind = batch * batch_size
if start_ind >= num_train_instances:
#This happens if our number of instances is perfectly divisible by batch size (when batch_size=1 this is often).
break
end_ind = num_train_instances if start_ind + batch_size >= num_train_instances else start_ind+batch_size
this_batch_size = end_ind - start_ind
## Gradually increase (?) the importance of the regularization term
ave_ind = start_ind + this_batch_size // 2
p = float(ave_ind + epoch * num_train_instances*2) / (epochs * num_train_instances*2)
alpha = 2. / (1. + np.exp(-10 * p)) - 1
source_batch = FloatTensor(X_task_train[start_ind:end_ind,:].toarray()).to(device) # read input
source_task_labels = torch.unsqueeze(FloatTensor([y_task_train[start_ind:end_ind],]).to(device), 1)# read task labels
source_domain_labels = torch.zeros(this_batch_size,1, device=device) # set to 0
# Get the task loss and domain loss for the source instance:
task_out, task_domain_out = model.forward(source_batch, alpha)
task_loss = task_loss_fn(task_out, source_task_labels)
domain_loss = domain_loss_fn(task_domain_out, source_domain_labels)
# domain2_loss = domain_loss_fn(task_domain_out[1], source_domain_labels)
try:
weights = model.feature.input_layer.vector
reg_term = l1_loss(weights, torch.zeros_like(weights, device=device))
except:
reg_term = 0
# Randomly select a matching number of target instances:
target_inds = choice(num_target_instances, this_batch_size, replace=False)
target_batch = FloatTensor(X_target_train[target_inds,:].toarray()).to(device) # read input
target_domain_labels = torch.ones(this_batch_size, 1, device=device)
# Get the domain loss for the target instances:
_, target_domain_out = model.forward(target_batch, alpha)
target_domain_loss = domain_loss_fn(target_domain_out, target_domain_labels)
# target_domain2_loss = domain_loss_fn(target_domain_out[1], target_domain_labels)
# Get sum loss update weights:
# domain adaptation:
# total_loss = task_loss + domain_weight * (domain_loss + target_domain_loss)
# Task only:
# total_loss = task_loss
# Domain only:
# total_loss = domain_loss + target_domain_loss
# Debugging with 2 domain classifiers:
# total_loss = domain_loss + domain2_loss + target_domain_loss + target_domain2_loss
# With regularization and DA term:
total_loss = (task_loss +
domain_weight * (domain_loss + target_domain_loss) +
reg_weight * reg_term)
# With regularization only:
# total_loss = task_loss + reg_term
epoch_loss += total_loss
total_loss.backward()
# for param in model.named_parameters():
# print(param[0])
# print(param[1])
optimizer.step()
# At the end of every epoch, examine domain accuracy and how many non-zero parameters we have
# unique_source_inds = np.unique(selected_source_inds)
# all_source_inds = np.arange(num_train_instances)
# eval_source_inds = np.setdiff1d(all_source_inds, unique_source_inds)
# source_eval_X = X_train[eval_source_inds]
# source_eval_y = y_train[eval_source_inds]
source_eval_X = X_task_valid
source_eval_y = y_task_valid
source_task_out, source_domain_out = model.forward( FloatTensor(source_eval_X.toarray()).to(device), alpha=0.)
# If using BCEWithLogitsLoss which would automatically do a sigmoid post-process
# source_task_out = nn.functional.sigmoid(source_task_out)
# source_domain_out = nn.functional.sigmoid(source_domain_out)
# source domain is 0, count up predictions where 1 - prediction = 1
# If using sigmoid outputs (0/1) with BCELoss
source_domain_preds = np.round(source_domain_out.cpu().data.numpy())
# if using Softmargin() loss (-1/1) with -1 as source domain
# source_domain_preds = np.round(((source_domain_out.cpu().data.numpy() * -1) + 1) / 2)
source_predicted_count = np.sum(1 - source_domain_preds)
source_domain_acc = source_predicted_count / len(source_eval_y)
target_eval_X = X_target_valid
_, target_domain_out = model.forward( FloatTensor(target_eval_X.toarray()).to(device), alpha=0.)
# If ussing with BCEWithLogitsLoss (see above)
# target_domain_out = nn.functional.sigmoid(target_domain_out)
# if using sigmoid output (0/1) with BCELoss
target_domain_preds = np.round(target_domain_out.cpu().data.numpy())
# if using Softmargin loss (-1/1) with 1 as target domain:
# target_domain_preds = np.round(((source_domain_out.cpu().data.numpy()) + 1) / 2)
target_predicted_count = np.sum(target_domain_preds)
domain_acc = (source_predicted_count + target_predicted_count) / (source_eval_X.shape[0] + target_eval_X.shape[0])
# if using 0/1 predictions:
source_y_pred = np.round(source_task_out.cpu().data.numpy()[:,0])
# if using -1/1 predictions? (-1 = not negated, 1 = negated)
# source_y_pred = np.round((source_task_out.cpu().data.numpy()[:,0] + 1) / 2)
# source_eval_y += 1
# source_eval_y /= 2
# predictions of 1 are the positive class: tps are where prediction and gold are 1
tps = np.sum(source_y_pred * source_eval_y)
true_preds = source_y_pred.sum()
true_labels = source_eval_y.sum()
recall = tps / true_labels
prec = 1 if tps == 0 else tps / true_preds
f1 = 2 * recall * prec / (recall+prec)
try:
weights = model.feature.input_layer.vector
num_zeros = (weights.data==0).sum()
near_zeros = (torch.abs(weights.data)<0.000001).sum()
print("Min (abs) weight: %f" % (torch.abs(weights).min()))
print("Max (abs) weight: %f" % (torch.abs(weights).max()))
print("Ave weight: %f" % (torch.abs(weights).mean()))
except:
num_zeros = near_zeros = -1
print("[Source] Epoch %d: loss=%f\tzeros=%d\tnear_zeros=%d\tnum_insts=%d\tdom_acc=%f\tP=%f\tR=%f\tF=%f" % (epoch, epoch_loss, num_zeros, near_zeros, len(source_eval_y), domain_acc, prec, recall, f1))
weights = model.feature.input_layer.vector
ranked_inds = torch.sort(torch.abs(weights))[1]
pivots = ranked_inds[0,-1000:]
pivot_list = pivots.cpu().data.numpy().tolist()
# pivot_list.sort()
for pivot in pivot_list:
print('%d : %s' % (pivot, feature_map[pivot]))
if __name__ == '__main__':
main(sys.argv[1:])
|
fujii-team/GPinv
|
GPinv/kernels.py
|
import tensorflow as tf
import numpy as np
import GPflow
from GPflow import kernels
from GPflow.tf_wraps import eye
from GPflow._settings import settings
from GPflow.param import ParamList
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class Kern(object):
"""
An object that added to multi-dimensional functionality to
GPflow.kernels.Kern.
This object is meant to be inherited along with GPflow.kernels.Kern in child
class.
The main difference of this kernel from GPflow.kernels.Stationary is that
this returns the multidimensional kernel values,
sized [X.shape[0],X2.shape[0],R].
The numpy equivalence is
np.vstack([v_0*core(X,X2), v_1*core(X,X2), ..., v_R*core(X,X2)])
This object provides efficient Cholesky Factorization method, self.Cholesky,
where the cholesky tensor is
np.vstack([sqrt(v_0)*chol, sqrt(v_1)*chol, ..., sqrt(v_R)*chol])
with
chol = Cholesky(K(X) + jitter)
"""
def __init__(self, output_dim):
"""
- input_dim is the dimension of the input to the kernel
- output_dim is the dimension of the output of this kernel
<-- This is an additional feature from GPflow.kernels.Stationary
- active_dims is a list of length input_dim which controls which
columns of X are used.
"""
# variance should be 1d-np.array sized [output_dim]
self.output_dim = output_dim
def K(self, X, X2=None):
core = tf.tile(tf.expand_dims(self._Kcore(X, X2),-1),
[1,1,tf.shape(self.variance)[0]]) # [N,N,R]
var = tf.tile(
tf.expand_dims(tf.expand_dims(self.variance, 0),0), # [1,1,R]
[tf.shape(core)[0],tf.shape(core)[1],1]) # [N,N,R]
return var * core
def Kdiag(self,X):
"""
Return: tf.tensor sized [N,R]
"""
return tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
def Cholesky(self, X):
core = self._Kcore(X, X2=None) + \
eye(tf.shape(X)[0]) * settings.numerics.jitter_level
chol = tf.cholesky(core)
var = tf.tile(tf.expand_dims(tf.expand_dims(
tf.sqrt(self.variance), 0),0),
[tf.shape(core)[0],tf.shape(core)[1],1])
return var * tf.tile(tf.expand_dims(chol, -1),[1,1,tf.shape(var)[2]])
def _Kcore(self, X, X2=None):
"""
Returns the unit kernel which is common for all the output dimensions.
"""
raise NotImplementedError
class Stationary(Kern, kernels.Stationary):
"""
Multidimensional version of Stationary kernel.
"""
def __init__(self, input_dim,
output_dim,
variance=None, lengthscales=None,
active_dims=None, ARD=False):
"""
- input_dim is the dimension of the input to the kernel
- output_dim is the dimension of the output of this kernel
<-- This is an additional feature from GPflow.kernels.Stationary
- variance : [1d-np.array] is the (initial) value for the variance parameter
with size output_dim.
- lengthscales is the initial value for the lengthscales parameter
defaults to 1.0 (ARD=False) or np.ones(input_dim) (ARD=True).
- active_dims is a list of length input_dim which controls which
columns of X are used.
- ARD specifies whether the kernel has one lengthscale per dimension
(ARD=True) or a single lengthscale (ARD=False).
"""
Kern.__init__(self, output_dim)
# variance should be 1d-np.array sized [output_dim]
if variance is None:
variance = np.ones(output_dim)
assert(variance.shape[0] == self.output_dim)
kernels.Stationary.__init__(self, input_dim, variance, lengthscales,
active_dims, ARD)
class RBF(Stationary):
"""
The radial basis function (RBF) or squared exponential kernel
"""
def _Kcore(self, X, X2=None):
X, X2 = self._slice(X, X2)
return tf.exp(-self.square_dist(X, X2)/2)
class RBF_csym(RBF):
"""
RBF kernel with a cylindrically symmetric assumption.
The kernel value is
K(x,x') = a exp(-(x+x)^2/2l^2)+a exp(-(x-x)^2/2l^2))
"""
def _Kcore(self, X, X2=None):
if X2 is None:
X2 = X
X = tf.abs(X)
X2= tf.abs(X2)
return RBF._Kcore(self, X, X2) + RBF._Kcore(self, X, -X2)
def Kdiag(self, X):
# returns [N] tensor
X, _ = self._slice(X, None)
X = tf.abs(X)
square_dist = tf.reduce_sum(tf.square((X+X)/self.lengthscales), 1)
# shape [N,R]
var = tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
diag = tf.exp(-0.5*square_dist)
diag = tf.tile(tf.expand_dims(tf.ones_like(diag)+diag, -1),
[1,tf.shape(var)[1]])
return var * diag
class RBF_casym(RBF):
"""
RBF kernel with a cylindrically anti-symmetric assumption.
The kernel value is
K(x,x') = a exp(-(x-x)^2/2l^2)) - a exp(-(x+x)^2/2l^2)
"""
def _Kcore(self, X, X2=None):
if X2 is None:
X2 = X
X = tf.abs(X)
X2= tf.abs(X2)
return RBF._Kcore(self, X, X2) - RBF._Kcore(self, X, -X2)
def Kdiag(self, X):
# returns [N] tensor
X, _ = self._slice(X, None)
X = tf.abs(X)
square_dist = tf.reduce_sum(tf.square((X+X)/self.lengthscales), 1)
# shape [N,R]
var = tf.tile(tf.expand_dims(self.variance,0), [tf.shape(X)[0],1])
diag = tf.exp(-0.5*square_dist)
diag = tf.tile(tf.expand_dims(tf.ones_like(diag)-diag, -1),
[1,tf.shape(var)[1]])
return var * diag
class Stack(Kern, kernels.Kern):
"""
Kernel object that returns multiple kinds of kernel values, stacked
vertically.
Input for the initializer is a list of Kernel object, [k_1,k_2,...,k_M].
The function call returns [k_1(X,X2),k_2(X,X2),...,k_M(X,X2)].
The size of the return is n x n2 x (sum_i k_i.output_dim).
"""
def __init__(self, list_of_kerns):
"""
:param list list_of_kerns: A list of Kernel object.
"""
output_dim = 0
for k in list_of_kerns:
# assert k is Kernel object
assert(isinstance(k, Kern))
output_dim += k.output_dim
Kern.__init__(self, output_dim)
kernels.Kern.__init__(self, input_dim=None)
# kernels are stored as ParamList
self.kern_list = ParamList(list_of_kerns)
def K(self, X, X2=None):
return tf.concat(2, [k.K(X,X2) for k in self.kern_list])
def Kdiag(self,X):
return tf.concat(1, [k.Kdiag(X) for k in self.kern_list])
def Cholesky(self, X):
return tf.concat(2, [k.Cholesky(X) for k in self.kern_list])
|
cmusatyalab/gammaray
|
src/gray-inferencer/ext4/journal-parser.py
|
#!/usr/bin/env python
# vim:set nospell:
from LogRecord import LogRecord
from struct import unpack
from sys import argv
from uuid import UUID
SECTOR_SIZE = 512
# 0x0 __be32 h_magic jbd2 magic number, 0xC03B3998.
# 0x4 __be32 h_blocktype Description of what this block contains. One of:
# 1 Descriptor. This block precedes a series of data blocks that were
# written through the journal during a transaction.
# 2 Block commit record. This block signifies the completion of a
# transaction.
# 3 Journal superblock, v1.
# 4 Journal superblock, v2.
# 5 Block revocation records. This speeds up recovery by enabling the
# journal to skip writing blocks that were subsequently rewritten.
# 0x8 __be32 h_sequence The transaction ID that goes with this block.
class JBD2BlockHeader(object):
MAGIC = 0xC03B3998
BLOCKTYPE = { 0x1: 'Descriptor',
0x2: 'Commit',
0x3: 'Superblockv1',
0x4: 'Superblockv2',
0x5: 'Revocation'
}
def __init__(self, data):
self.h_magic, \
self.h_blocktype, \
self.h_sequence = unpack('>III', data)
def __str__(self):
retstr = '{ .h_magic = 0x%x, \n'
retstr += ' .h_blocktype = %s\n'
retstr += ' .h_sequence = 0x%x }'
return retstr % (self.h_magic,
JBD2BlockHeader.BLOCKTYPE[self.h_blocktype],
self.h_sequence)
# 0x0 journal_header_t (12 bytes) s_header Common header identifying this as a superblock.
# Static information describing the journal.
# 0xC __be32 s_blocksize Journal device block size.
# 0x10 __be32 s_maxlen Total number of blocks in this journal.
# 0x14 __be32 s_first First block of log information.
# Dynamic information describing the current state of the log.
# 0x18 __be32 s_sequence First commit ID expected in log.
# 0x1C __be32 s_start Block number of the start of log. Contrary to the comments, this field being zero does not imply that the journal is clean!
# 0x20 __be32 s_errno Error value, as set by jbd2_journal_abort().
# The remaining fields are only valid in a version 2 superblock.
# 0x24 __be32 s_feature_compat; Compatible feature set. Any of:
# 0x1 Journal maintains checksums on the data blocks. (JBD2_FEATURE_COMPAT_CHECKSUM)
# 0x28 __be32 s_feature_incompat Incompatible feature set. Any of:
# 0x1 Journal has block revocation records. (JBD2_FEATURE_INCOMPAT_REVOKE)
# 0x2 Journal can deal with 64-bit block numbers. (JBD2_FEATURE_INCOMPAT_64BIT)
# 0x4 Journal commits asynchronously. (JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)
# 0x8 This journal uses v2 of the checksum on-disk format. Each journal metadata block gets its own checksum, and the block tags in the descriptor table contain checksums for each of the data blocks in the journal. (JBD2_FEATURE_INCOMPAT_CSUM_V2)
# 0x10 This journal uses v3 of the checksum on-disk format. This is the same as v2, but the journal block tag size is fixed regardless of the size of block numbers. (JBD2_FEATURE_INCOMPAT_CSUM_V3)
# 0x2C __be32 s_feature_ro_compat Read-only compatible feature set. There aren't any of these currently.
# 0x30 __u8 s_uuid[16] 128-bit uuid for journal. This is compared against the copy in the ext4 super block at mount time.
# 0x40 __be32 s_nr_users Number of file systems sharing this journal.
# 0x44 __be32 s_dynsuper Location of dynamic super block copy. (Not used?)
# 0x48 __be32 s_max_transaction Limit of journal blocks per transaction. (Not used?)
# 0x4C __be32 s_max_trans_data Limit of data blocks per transaction. (Not used?)
# 0x50 __u8 s_checksum_type Checksum algorithm used for the journal. 1 = crc32, 2 = md5, 3 = sha1, 4 = crc32c. 1 or 4 are the most likely choices.
# 0x51 __u8[3] s_padding2
# 0x54 __u32 s_padding[42]
# 0xFC __be32 s_checksum Checksum of the entire superblock, with this field set to zero.
# 0x100 __u8 s_users[16*48] ids of all file systems sharing the log. e2fsprogs/Linux don't allow shared external journals, but I imagine Lustre (or ocfs2?), which use the jbd2 code, might.
class JBD2SuperBlock(object):
JBD2_FEATURE_COMPAT_CHECKSUM = 0x1
JBD2_FEATURE_INCOMPAT_REVOKE = 0x1
JBD2_FEATURE_INCOMPAT_64BIT = 0x2
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT = 0x4
JBD2_FEATURE_INCOMPAT_CSUM_V2 = 0x8
JBD2_FEATURE_INCOMPAT_CSUM_V3 = 0x10
CHECKSUM = { 1: 'crc32',
2: 'md5',
3: 'sha1',
4: 'crc32c'
}
def __init__(self, data):
self.s_blocksize, \
self.s_maxlen, \
self.s_first, \
self.s_sequence, \
self.s_start, \
self.s_errno, \
self.s_feature_compat, \
self.s_feature_incompat, \
self.s_feature_ro_compat, \
self.s_uuid, \
self.s_nr_users, \
self.s_dynsuper, \
self.s_max_transaction, \
self.s_max_trans_data, \
self.s_checksum_type, \
self.s_padding2, \
self.s_padding, \
self.s_checksum, \
self.s_users = \
unpack('>9I16s4IB3s168sI768s', data[:1012])
def __str__(self):
retstr = '-- JBD2 Superblock --\n'
retstr += '\ts_blocksize\t\t=\t%d\n' % (self.s_blocksize)
retstr += '\ts_maxlen\t\t=\t%d (%d MiB)\n' % (self.s_maxlen,
self.s_blocksize *
self.s_maxlen /
1024 ** 2)
retstr += '\ts_feature_compat\t=\t0x%0.8x\n' % (self.s_feature_compat)
if self.s_feature_compat & \
JBD2SuperBlock.JBD2_FEATURE_COMPAT_CHECKSUM:
retstr += '\tJBD2_FEATURE_COMPAT_CHECKSUM is set.\n'
retstr += '\ts_feature_incompat\t=\t0x%0.8x\n' % \
(self.s_feature_incompat)
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_REVOKE:
retstr += '\tJBD2_FEATURE_INCOMPAT_REVOCATION is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_64BIT:
retstr += '\tJBD2_FEATURE_INCOMPAT_64BIT is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT:
retstr += '\tJBD2_FEATURE_INCOMPAT_ASYNC_COMMIT is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_CSUM_V2:
retstr += '\tJBD2_FEATURE_COMPAT_CSUM_V2 is set.\n'
if self.s_feature_incompat & \
JBD2SuperBlock.JBD2_FEATURE_INCOMPAT_CSUM_V3:
retstr += '\tJBD2_FEATURE_COMPAT_CSUM_V3 is set.\n'
retstr += '\tself.s_uuid\t\t=\t%s\n' % UUID(bytes=self.s_uuid)
retstr += '\tself.s_nr_users\t\t=\t%d\n' % (self.s_nr_users)
retstr += '\tself.s_max_transaction\t=\t%d\n' % \
(self.s_max_transaction)
retstr += '\tself.s_max_trans_data\t=\t%d\n' % \
(self.s_max_trans_data)
if self.s_checksum_type != 0x0:
retstr += '\tself.s_checksum_type\t=\t%s\n' % \
(JBD2SuperBlock.CHECKSUM[self.s_checksum_type])
for i in xrange(self.s_nr_users):
retstr += '\tself.users[%d]\t\t=\t%s\n' % \
(i, UUID(bytes=self.s_users[16*i:16+16*i]))
retstr += '-- End JBD2 Superblock --\n'
return retstr
class JBD2RevocationBlock(object):
pass
# 0x0 journal_header_s (open coded) Common block header.
# 0xC unsigned char h_chksum_type The type of checksum to use to verify the integrity of the data blocks in the transaction. One of:
# 1 CRC32
# 2 MD5
# 3 SHA1
# 4 CRC32C
# 0xD unsigned char h_chksum_size The number of bytes used by the checksum. Most likely 4.
# 0xE unsigned char h_padding[2]
# 0x10 __be32 h_chksum[JBD2_CHECKSUM_BYTES] 32 bytes of space to store checksums. If JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the first __be32 is the checksum of the journal UUID and the entire commit block, with this field zeroed. If JBD2_FEATURE_COMPAT_CHECKSUM is set, the first __be32 is the crc32 of all the blocks already written to the transaction.
# 0x30 __be64 h_commit_sec The time that the transaction was committed, in seconds since the epoch.
# 0x38 __be32 h_commit_nsec Nanoseconds component of the above timestamp.
class JBD2CommitBlock(object):
def __init__(self, data):
self.h_chksum_type, \
self.h_chksum_size, \
self.h_padding, \
self.h_chksum, \
self.h_commit_sec, \
self.h_commit_nsec = \
unpack('>BB2s32sQI', data[:48])
def __str__(self):
retstr = '-- JBD2CommitBlock --\n'
retstr += '\th_chksum_type\t=\t%d\n' % self.h_chksum_type
retstr += '\th_chksum_size\t=\t%d\n' % self.h_chksum_size
retstr += '\th_chksum\t=\t%r\n' % self.h_chksum
retstr += '\th_commit_sec\t=\t%d\n' % self.h_commit_sec
retstr += '\th_commit_nsec\t=\t%d\n' % self.h_commit_nsec
return retstr
# 0x0 journal_header_t (open coded) Common block header.
# 0xC struct journal_block_tag_s open coded array[] Enough tags either to fill up the block or to describe all the data blocks that follow this descriptor block.
# Journal block tags have any of the following formats, depending on which journal feature and block tag flags are set.
# If JBD2_FEATURE_INCOMPAT_CSUM_V3 is set, the journal block tag is defined as struct journal_block_tag3_s, which looks like the following. The size is 16 or 32 bytes.
# Offset Type Name Descriptor
# 0x0 __be32 t_blocknr Lower 32-bits of the location of where the corresponding data block should end up on disk.
# 0x4 __be32 t_flags Flags that go with the descriptor. Any of:
# 0x1 On-disk block is escaped. The first four bytes of the data block just happened to match the jbd2 magic number.
# 0x2 This block has the same UUID as previous, therefore the UUID field is omitted.
# 0x4 The data block was deleted by the transaction. (Not used?)
# 0x8 This is the last tag in this descriptor block.
# 0x8 __be32 t_blocknr_high Upper 32-bits of the location of where the corresponding data block should end up on disk. This is zero if JBD2_FEATURE_INCOMPAT_64BIT is not enabled.
# 0xC __be32 t_checksum Checksum of the journal UUID, the sequence number, and the data block.
# This field appears to be open coded. It always comes at the end of the tag, after t_checksum. This field is not present if the "same UUID" flag is set.
# 0x8 or 0xC char uuid[16] A UUID to go with this tag. This field appears to be copied from the j_uuid field in struct journal_s, but only tune2fs touches that field.
# If JBD2_FEATURE_INCOMPAT_CSUM_V3 is NOT set, the journal block tag is defined as struct journal_block_tag_s, which looks like the following. The size is 8, 12, 24, or 28 bytes:
# Offset Type Name Descriptor
# 0x0 __be32 t_blocknr Lower 32-bits of the location of where the corresponding data block should end up on disk.
# 0x4 __be16 t_checksum Checksum of the journal UUID, the sequence number, and the data block. Note that only the lower 16 bits are stored.
# 0x6 __be16 t_flags Flags that go with the descriptor. Any of:
# 0x1 On-disk block is escaped. The first four bytes of the data block just happened to match the jbd2 magic number.
# 0x2 This block has the same UUID as previous, therefore the UUID field is omitted.
# 0x4 The data block was deleted by the transaction. (Not used?)
# 0x8 This is the last tag in this descriptor block.
# This next field is only present if the super block indicates support for 64-bit block numbers.
# 0x8 __be32 t_blocknr_high Upper 32-bits of the location of where the corresponding data block should end up on disk.
# This field appears to be open coded. It always comes at the end of the tag, after t_flags or t_blocknr_high. This field is not present if the "same UUID" flag is set.
# 0x8 or 0xC char uuid[16] A UUID to go with this tag. This field appears to be copied from the j_uuid field in struct journal_s, but only tune2fs touches that field.
# If JBD2_FEATURE_INCOMPAT_CSUM_V2 or JBD2_FEATURE_INCOMPAT_CSUM_V3 are set, the end of the block is a struct jbd2_journal_block_tail, which looks like this:
# Offset Type Name Descriptor
# 0x0 __be32 t_checksum Checksum of the journal UUID + the descriptor block, with this field set to zero.
class JBD2DescriptorBlock(object):
def __init__(self, data):
self.journal_block_tag_s = \
[tag for tag in JBD2DescriptorBlock.ReadBlockTags(data)]
@staticmethod
def ReadBlockTags(data):
pos = 0
tag = None
while pos < len(data) and (tag is None or not tag.t_flags & 0x8):
tag = JBD2BlockTag(data[pos:])
pos += tag.size
yield tag
def tagGenerator(self):
for tag in self.journal_block_tag_s:
yield tag
def __str__(self):
retstr = '-- JBD2 Descriptor Block --\n'
for tag in self.journal_block_tag_s:
retstr += str(tag)
return retstr
class JBD2BlockTag(object):
def __init__(self, data):
self.t_blocknr, \
self.t_checksum, \
self.t_flags = unpack('>IHH', data[0:8])
self.t_uuid = None
self.size = 8
if not self.t_flags & 0x2:
self.t_uuid = UUID(bytes=unpack('>16s', data[8:24])[0])
self.size = 24
def __str__(self):
retstr = '\t-- JBD2 Tag --\n'
retstr += '\t\tt_blocknr\t=\t%d\n' % self.t_blocknr
retstr += '\t\tt_checksum\t=\t%d\n' % self.t_checksum
retstr += '\t\tt_flags\t\t=\t0x%0.8x\n' % self.t_flags
if self.t_uuid is not None:
retstr += '\t\tt_uuid\t=\t%s\n' % self.t_uuid
return retstr
if __name__ == '__main__':
fname = argv[1]
with open(fname, 'rb') as f:
prevts = 0
current_tags = None
superblock = None
for log in LogRecord.LogRecordGenerator(f.read()):
if log.type == 'data':
print log
hdr = JBD2BlockHeader(log.write[:12])
if hdr.h_magic == JBD2BlockHeader.MAGIC:
print hdr
data = log.write[12:]
if hdr.h_blocktype == 0x1:
print '-- Descriptor Block --'
descriptor = JBD2DescriptorBlock(data)
current_tags = descriptor.tagGenerator()
print descriptor
elif hdr.h_blocktype == 0x2:
print '-- Commit Block --'
commit = JBD2CommitBlock(data)
try:
current_tags.next()
raise Exception('Did not process all tags!')
except StopIteration:
print '\tFinished Processing all tags.'
print commit
elif hdr.h_blocktype == 0x3:
print '-- Superblock v1 --'
elif hdr.h_blocktype == 0x4:
print '-- Superblock v2 --'
superblock = JBD2SuperBlock(data)
print superblock
elif hdr.h_blocktype == 0x5:
print '-- Revocation Block --'
exit()
else:
raise Exception('Unknown JBD2 Block Type.')
else:
tag = current_tags.next()
if tag.t_flags & 0x1: data[0:4] = (0xc0, 0x3b, 0x39, 0x98)
sector = tag.t_blocknr
sector *= superblock.s_blocksize
sector /= SECTOR_SIZE
print 'Data Write to Sector: %d\n' % (sector)
if prevts == 0: prevts = int(log.timestamp)
print int(log.timestamp) - prevts
prevts = int(log.timestamp)
else:
print log # metadata
|
nesterione/core-of-my-services
|
scripts/run_ads_ranking.py
|
# coding=utf-8
import pymongo
import re
from math import exp
from datetime import date, datetime
connection = pymongo.MongoClient("")
db = connection.dataservices
ads = db.ads
# 1 - Указан ли адрес
# 2 - Указаны ли контактные данные
# 3 - Наличие фотографий Больше 1, тоже плюс
# 4 - Давность подачи объявления
# 5 - Указана ли цена
# важность критериев предлагаю выбрать интуитивно:
# 1 - 0.1
# 2 - 0.3
# 3 - 0.05 за фотку - максимум 0.1
# 4 - 0.4 1+1/-(1+exp(-0.2(x-30)))
# 5 - 0.1
# address_weight = (lambda isHasAddress: 0.1 if isHasAddress else 0)
contacts_weight = (lambda isHasContacts: 0.3 if isHasContacts else 0)
def photos_weight(count):
if count > 1:
return 0.1
elif count == 1:
return 0.05
else:
return 0
ads_ago_weight = (lambda isHasCost: 0.5 * (1+1/-(1+exp(-0.2*(isHasCost-30)))))
cost_weight = (lambda isHasCost: 0.1 if isHasCost else 0)
try:
query = {}
cursor = ads.find(query)
for ad in cursor:
# images rank
imgcnt = 0
if 'images' in ad:
imgcnt = len(ad['images'])
C1_photos = photos_weight(imgcnt)
# address rank
# addressHas = False
# if 'address' in ad:
# if bool(ad['address'].strip()):
# addressHas = True
# C2_address = address_weight(addressHas)
# contacts rank
contactsHas = False
if 'contacts' in ad:
if bool(ad['contacts'].strip()):
contactsHas = True
C3_contacts = contacts_weight(contactsHas)
# cost rank
prog = re.compile("^[\D]+$")
costHas = False
if 'cost' in ad:
cost = ad['cost']
if prog.match(cost) is None:
costHas = True
C4_cost = cost_weight(costHas)
# date rank
C5_date = 0
if 'date' in ad:
d0 = datetime.today()
d1 = ad['date']
delta = d0 - d1
C5_date = ads_ago_weight(delta.days)
C = C1_photos + C3_contacts + C4_cost + C5_date
print("rank: ",C)
# update costValue
#if 'costValue' not in ad:
try:
ads.update({"_id": ad["_id"]}, {"$set": {"rank": C}})
except Exception as e:
print("No update", type(e), e)
except Exception as e:
print ("Unexpected error:", type(e), e)
|
googleapis/python-resource-manager
|
samples/generated_samples/cloudresourcemanager_v3_generated_tag_bindings_create_tag_binding_sync.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTagBinding
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-resourcemanager
# [START cloudresourcemanager_v3_generated_TagBindings_CreateTagBinding_sync]
from google.cloud import resourcemanager_v3
def sample_create_tag_binding():
# Create a client
client = resourcemanager_v3.TagBindingsClient()
# Initialize request argument(s)
request = resourcemanager_v3.CreateTagBindingRequest(
)
# Make the request
operation = client.create_tag_binding(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END cloudresourcemanager_v3_generated_TagBindings_CreateTagBinding_sync]
|
rackspace-titan/stacktester
|
stacktester/tests/test_images.py
|
import json
import os
import re
import unittest2 as unittest
from stacktester import openstack
class ImagesTest(unittest.TestCase):
def setUp(self):
self.os = openstack.Manager()
host = self.os.config.nova.host
port = self.os.config.nova.port
def tearDown(self):
pass
def _assert_image_links(self, image):
image_id = str(image['id'])
mgmt_url = self.os.nova.management_url
bmk_url = re.sub(r'v1.1\/', r'', mgmt_url)
self_link = os.path.join(mgmt_url, 'images', image_id)
bookmark_link = os.path.join(bmk_url, 'images', image_id)
expected_links = [
{
'rel': 'self',
'href': self_link,
},
{
'rel': 'bookmark',
'href': bookmark_link,
},
]
self.assertEqual(image['links'], expected_links)
def _assert_image_entity_basic(self, image):
actual_keys = set(image.keys())
expected_keys = set((
'id',
'name',
'links',
))
self.assertEqual(actual_keys, expected_keys)
self._assert_image_links(image)
def _assert_image_entity_detailed(self, image):
keys = image.keys()
if 'server' in keys:
keys.remove('server')
actual_keys = set(keys)
expected_keys = set((
'id',
'name',
'progress',
'created',
'updated',
'status',
'metadata',
'links',
))
self.assertEqual(actual_keys, expected_keys)
self._assert_image_links(image)
def test_index(self):
"""List all images"""
response, body = self.os.nova.request('GET', '/images')
self.assertEqual(response['status'], '200')
resp_body = json.loads(body)
self.assertEqual(resp_body.keys(), ['images'])
for image in resp_body['images']:
self._assert_image_entity_basic(image)
def test_detail(self):
"""List all images in detail"""
response, body = self.os.nova.request('GET', '/images/detail')
self.assertEqual(response['status'], '200')
resp_body = json.loads(body)
self.assertEqual(resp_body.keys(), ['images'])
for image in resp_body['images']:
self._assert_image_entity_detailed(image)
|
tensorflow/federated
|
tensorflow_federated/python/core/impl/compiler/tensorflow_computation_factory.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of contruction functions for tensorflow computation structures."""
import functools
import types
from typing import Any, Callable, Optional
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.compiler import local_computation_factory_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.types import type_transformations
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
# TODO(b/181028772): Move this and similar code to `backends/tensorflow`.
# TODO(b/181131807): Remove independent invocations of the helper methods, and
# replace them with calls to the factory, then inline the bodies of the methods
# within the factory.
ComputationProtoAndType = local_computation_factory_base.ComputationProtoAndType
class TensorFlowComputationFactory(
local_computation_factory_base.LocalComputationFactory):
"""An implementation of local computation factory for TF computations."""
def __init__(self):
pass
def create_constant_from_scalar(
self, value,
type_spec: computation_types.Type) -> ComputationProtoAndType:
return create_constant(value, type_spec)
def create_plus_operator(
self, type_spec: computation_types.Type) -> ComputationProtoAndType:
def plus(a, b):
return structure.map_structure(tf.add, a, b)
return create_binary_operator(plus, type_spec)
def create_multiply_operator(
self, type_spec: computation_types.Type) -> ComputationProtoAndType:
def multiply(a, b):
return structure.map_structure(tf.multiply, a, b)
return create_binary_operator(multiply, type_spec)
def create_scalar_multiply_operator(
self, operand_type: computation_types.Type,
scalar_type: computation_types.TensorType) -> ComputationProtoAndType:
return create_binary_operator_with_upcast(
computation_types.StructType([(None, operand_type),
(None, scalar_type)]), tf.multiply)
def create_indexing_operator(
self,
operand_type: computation_types.TensorType,
index_type: computation_types.TensorType,
) -> ComputationProtoAndType:
return create_indexing_operator(operand_type, index_type)
def _tensorflow_comp(
tensorflow_proto: pb.TensorFlow,
type_signature: computation_types.Type,
) -> ComputationProtoAndType:
serialized_type = type_serialization.serialize_type(type_signature)
comp = pb.Computation(type=serialized_type, tensorflow=tensorflow_proto)
return (comp, type_signature)
def create_constant(
value, type_spec: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning a constant `value`.
The returned computation has the type signature `( -> T)`, where `T` is
`type_spec`.
`value` must be a value convertible to a tensor or a structure of values, such
that the dtype and shapes match `type_spec`. `type_spec` must contain only
named tuples and tensor types, but these can be arbitrarily nested.
Args:
value: A value to embed as a constant in the tensorflow graph.
type_spec: A `computation_types.Type` to use as the argument to the
constructed binary operator; must contain only named tuples and tensor
types.
Raises:
TypeError: If the constraints of `type_spec` are violated.
"""
if not type_analysis.is_generic_op_compatible_type(type_spec):
raise TypeError(
'Type spec {} cannot be constructed as a TensorFlow constant in TFF; '
' only nested tuples and tensors are permitted.'.format(type_spec))
inferred_value_type = type_conversions.infer_type(value)
if (inferred_value_type.is_struct() and
not type_spec.is_assignable_from(inferred_value_type)):
raise TypeError(
'Must pass a only tensor or structure of tensor values to '
'`create_tensorflow_constant`; encountered a value {v} with inferred '
'type {t!r}, but needed {s!r}'.format(
v=value, t=inferred_value_type, s=type_spec))
if inferred_value_type.is_struct():
value = structure.from_container(value, recursive=True)
tensor_dtypes_in_type_spec = []
def _pack_dtypes(type_signature):
"""Appends dtype of `type_signature` to nonlocal variable."""
if type_signature.is_tensor():
tensor_dtypes_in_type_spec.append(type_signature.dtype)
return type_signature, False
type_transformations.transform_type_postorder(type_spec, _pack_dtypes)
if (any(x.is_integer for x in tensor_dtypes_in_type_spec) and
(inferred_value_type.is_tensor() and
not inferred_value_type.dtype.is_integer)):
raise TypeError(
'Only integers can be used as scalar values if our desired constant '
'type spec contains any integer tensors; passed scalar {} of dtype {} '
'for type spec {}.'.format(value, inferred_value_type.dtype, type_spec))
result_type = type_spec
def _create_result_tensor(type_spec, value):
"""Packs `value` into `type_spec` recursively."""
if type_spec.is_tensor():
type_spec.shape.assert_is_fully_defined()
result = tf.constant(value, dtype=type_spec.dtype, shape=type_spec.shape)
else:
elements = []
if inferred_value_type.is_struct():
# Copy the leaf values according to the type_spec structure.
for (name, elem_type), value in zip(
structure.iter_elements(type_spec), value):
elements.append((name, _create_result_tensor(elem_type, value)))
else:
# "Broadcast" the value to each level of the type_spec structure.
for _, elem_type in structure.iter_elements(type_spec):
elements.append((None, _create_result_tensor(elem_type, value)))
result = structure.Struct(elements)
return result
with tf.Graph().as_default() as graph:
result = _create_result_tensor(result_type, value)
_, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
type_signature = computation_types.FunctionType(None, result_type)
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=None,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_unary_operator(
operator, operand_type: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing a unary operation.
The returned computation has the type signature `(T -> U)`, where `T` is
`operand_type` and `U` is the result of applying the `operator` to a value of
type `T`
Args:
operator: A callable taking one argument representing the operation to
encode For example: `tf.math.abs`.
operand_type: A `computation_types.Type` to use as the argument to the
constructed unary operator; must contain only named tuples and tensor
types.
Raises:
TypeError: If the constraints of `operand_type` are violated or `operator`
is not callable.
"""
if (operand_type is None or
not type_analysis.is_generic_op_compatible_type(operand_type)):
raise TypeError(
'`operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
f'this is disallowed in the generic operators. Got: {operand_type} ')
py_typecheck.check_callable(operator)
with tf.Graph().as_default() as graph:
operand_value, operand_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', operand_type, graph)
result_value = operator(operand_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(operand_type, result_type)
parameter_binding = operand_binding
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_binary_operator(
operator,
operand_type: computation_types.Type,
second_operand_type: Optional[computation_types.Type] = None
) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing a binary operation.
The returned computation has the type signature `(<T,T> -> U)`, where `T` is
`operand_type` and `U` is the result of applying the `operator` to a tuple of
type `<T,T>`
Note: If `operand_type` is a `computation_types.StructType`, then
`operator` will be applied pointwise. This places the burden on callers of
this function to construct the correct values to pass into the returned
function. For example, to divide `[2, 2]` by `2`, first `2` must be packed
into the data structure `[x, x]`, before the division operator of the
appropriate type is called.
Args:
operator: A callable taking two arguments representing the operation to
encode For example: `tf.math.add`, `tf.math.multiply`, and
`tf.math.divide`.
operand_type: A `computation_types.Type` to use as the argument to the
constructed binary operator; must contain only named tuples and tensor
types.
second_operand_type: An optional `computation_types.Type` to use as the
seocnd argument to the constructed binary operator. If `None`, operator
uses `operand_type` for both arguments. Must contain only named tuples and
tensor types.
Raises:
TypeError: If the constraints of `operand_type` are violated or `operator`
is not callable.
"""
if not type_analysis.is_generic_op_compatible_type(operand_type):
raise TypeError(
'`operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
f'this is disallowed in the generic operators. Got: {operand_type} ')
if second_operand_type is not None:
if not type_analysis.is_generic_op_compatible_type(second_operand_type):
raise TypeError(
'`second_operand_type` contains a type other than '
'`computation_types.TensorType` and `computation_types.StructType`; '
'this is disallowed in the generic operators. '
f'Got: {second_operand_type} ')
elif second_operand_type is None:
second_operand_type = operand_type
py_typecheck.check_callable(operator)
with tf.Graph().as_default() as graph:
operand_1_value, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', operand_type, graph)
operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
'y', second_operand_type, graph)
result_value = operator(operand_1_value, operand_2_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(
computation_types.StructType((operand_type, second_operand_type)),
result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_1_binding, operand_2_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_binary_operator_with_upcast(
type_signature: computation_types.StructType,
operator: Callable[[Any, Any], Any]) -> ComputationProtoAndType:
"""Creates TF computation upcasting its argument and applying `operator`.
Args:
type_signature: A `computation_types.StructType` with two elements, both
only containing structs or tensors in their type tree. The first and
second element must match in structure, or the second element may be a
single tensor type that is broadcasted (upcast) to the leaves of the
structure of the first type. This single tensor may be assignable to the
tensor types at the leaves, or in the case that the leaves have fully
defined shapes, this tensor may be `tf.broadcast`-ed to each of those
shapes. In the case of non-assignability and non-fully defined shapes
at the leaves of the structure, this function will raise.
operator: Callable defining the operator.
Returns:
Same as `create_binary_operator()`.
"""
py_typecheck.check_type(type_signature, computation_types.StructType)
py_typecheck.check_callable(operator)
type_analysis.check_tensorflow_compatible_type(type_signature)
if not type_signature.is_struct() or len(type_signature) != 2:
raise TypeError('To apply a binary operator, we must by definition have an '
'argument which is a `StructType` with 2 elements; '
'asked to create a binary operator for type: {t}'.format(
t=type_signature))
if type_analysis.contains(type_signature, lambda t: t.is_sequence()):
raise TypeError(
'Applying binary operators in TensorFlow is only '
'supported on Tensors and StructTypes; you '
'passed {t} which contains a SequenceType.'.format(t=type_signature))
def _pack_into_type(to_pack: tf.Tensor, type_spec: computation_types.Type):
"""Pack Tensor value `to_pack` into the nested structure `type_spec`."""
if type_spec.is_struct():
elem_iter = structure.iter_elements(type_spec)
return structure.Struct([(elem_name, _pack_into_type(to_pack, elem_type))
for elem_name, elem_type in elem_iter])
elif type_spec.is_tensor():
value_tensor_type = type_conversions.type_from_tensors(to_pack)
if type_spec.is_assignable_from(value_tensor_type):
return to_pack
elif not type_spec.shape.is_fully_defined():
raise TypeError('Cannot generate TensorFlow creating binary operator '
'with first type not assignable from second, and '
'first type without fully defined shapes. First '
f'type contains an element of type: {type_spec}.\n'
f'Packing value {to_pack} into this type is '
'undefined.')
return tf.cast(tf.broadcast_to(to_pack, type_spec.shape), type_spec.dtype)
with tf.Graph().as_default() as graph:
first_arg, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', type_signature[0], graph)
operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
'y', type_signature[1], graph)
if type_signature[0].is_struct() and type_signature[1].is_struct():
# If both the first and second arguments are structs with the same
# structure, simply re-use operand_2_value as. `tf.nest.map_structure`
# below will map the binary operator pointwise to the leaves of the
# structure.
if structure.is_same_structure(type_signature[0], type_signature[1]):
second_arg = operand_2_value
else:
raise TypeError('Cannot upcast one structure to a different structure. '
'{x} -> {y}'.format(
x=type_signature[1], y=type_signature[0]))
elif type_signature[0].is_assignable_from(type_signature[1]):
second_arg = operand_2_value
else:
second_arg = _pack_into_type(operand_2_value, type_signature[0])
if type_signature[0].is_tensor():
result_value = operator(first_arg, second_arg)
elif type_signature[0].is_struct():
result_value = structure.map_structure(operator, first_arg, second_arg)
else:
raise TypeError('Encountered unexpected type {t}; can only handle Tensor '
'and StructTypes.'.format(t=type_signature[0]))
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(type_signature, result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_1_binding, operand_2_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_indexing_operator(
operand_type: computation_types.TensorType,
index_type: computation_types.TensorType,
) -> ComputationProtoAndType:
"""Returns a tensorflow computation computing an indexing operation."""
operand_type.check_tensor()
index_type.check_tensor()
if index_type.shape.rank != 0:
raise TypeError(f'Expected index type to be a scalar, found {index_type}.')
with tf.Graph().as_default() as graph:
operand_value, operand_binding = tensorflow_utils.stamp_parameter_in_graph(
'indexing_operand', operand_type, graph)
index_value, index_binding = tensorflow_utils.stamp_parameter_in_graph(
'index', index_type, graph)
result_value = tf.gather(operand_value, index_value)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
type_signature = computation_types.FunctionType(
computation_types.StructType((operand_type, index_type)), result_type)
parameter_binding = pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=[operand_binding, index_binding]))
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
def create_empty_tuple() -> ComputationProtoAndType:
"""Returns a tensorflow computation returning an empty tuple.
The returned computation has the type signature `( -> <>)`.
"""
return create_computation_for_py_fn(lambda: structure.Struct([]), None)
def create_identity(
type_signature: computation_types.Type) -> ComputationProtoAndType:
"""Returns a tensorflow computation representing an identity function.
The returned computation has the type signature `(T -> T)`, where `T` is
`type_signature`. NOTE: if `T` contains `computation_types.StructType`s
without an associated container type, they will be given the container type
`tuple` by this function.
Args:
type_signature: A `computation_types.Type` to use as the parameter type and
result type of the identity function.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings.
"""
type_analysis.check_tensorflow_compatible_type(type_signature)
parameter_type = type_signature
if parameter_type is None:
raise TypeError('TensorFlow identity cannot be created for NoneType.')
# TF relies on feeds not-identical to fetches in certain circumstances.
if type_signature.is_tensor() or type_signature.is_sequence():
identity_fn = tf.identity
elif type_signature.is_struct():
identity_fn = functools.partial(structure.map_structure, tf.identity)
else:
raise NotImplementedError(
f'TensorFlow identity cannot be created for type {type_signature}')
return create_computation_for_py_fn(identity_fn, parameter_type)
def create_replicate_input(type_signature: computation_types.Type,
count: int) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning `count` copies of its argument.
The returned computation has the type signature `(T -> <T, T, T, ...>)`, where
`T` is `type_signature` and the length of the result is `count`.
Args:
type_signature: A `computation_types.Type` to replicate.
count: An integer, the number of times the input is replicated.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings or if `which` is not an integer.
"""
type_analysis.check_tensorflow_compatible_type(type_signature)
py_typecheck.check_type(count, int)
parameter_type = type_signature
identity_comp, _ = create_identity(parameter_type)
# This manual proto manipulation is significantly faster than using TFF's
# GraphDef serialization for large `count` arguments.
tensorflow_comp = identity_comp.tensorflow
single_result_binding = tensorflow_comp.result
if tensorflow_comp.parameter:
new_tf_pb = pb.TensorFlow(
graph_def=tensorflow_comp.graph_def,
parameter=tensorflow_comp.parameter,
result=pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=(single_result_binding for _ in range(count)))))
else:
new_tf_pb = pb.TensorFlow(
graph_def=tensorflow_comp.graph_def,
result=pb.TensorFlow.Binding(
struct=pb.TensorFlow.StructBinding(
element=(single_result_binding for _ in range(count)))))
fn_type = computation_types.FunctionType(
parameter_type,
computation_types.StructType([(None, parameter_type) for _ in range(count)
]))
return _tensorflow_comp(new_tf_pb, fn_type)
def create_computation_for_py_fn(
fn: types.FunctionType, parameter_type: Optional[computation_types.Type]
) -> ComputationProtoAndType:
"""Returns a tensorflow computation returning the result of `fn`.
The returned computation has the type signature `(T -> U)`, where `T` is
`parameter_type` and `U` is the type returned by `fn`.
Args:
fn: A Python function.
parameter_type: A `computation_types.Type` or `None`.
"""
if parameter_type is not None:
py_typecheck.check_type(parameter_type, computation_types.Type)
with tf.Graph().as_default() as graph:
if parameter_type is not None:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', parameter_type, graph)
result = fn(parameter_value)
else:
parameter_binding = None
result = fn()
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
type_signature = computation_types.FunctionType(parameter_type, result_type)
tensorflow = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding)
return _tensorflow_comp(tensorflow, type_signature)
|
jdwittenauer/ionyx
|
tests/experiment_test.py
|
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from keras.wrappers.scikit_learn import KerasRegressor
from ionyx import Experiment
from ionyx.contrib.keras_builder import KerasBuilder
from ionyx.datasets import DataSetLoader
print('Beginning experiment test...')
data, _, _ = DataSetLoader.load_forest_cover()
X_cols = data.columns[1:].tolist()
y_col = data.columns[0]
logistic = LogisticRegression()
cv = KFold()
experiment = Experiment(package='sklearn', model=logistic, scoring_metric='accuracy',
verbose=True, data=data, X_columns=X_cols, y_column=y_col, cv=cv)
experiment.train_model()
experiment.cross_validate()
experiment.learning_curve()
param_grid = [
{
'alpha': [0.01, 0.1, 1.0]
}
]
experiment.param_search(param_grid, save_results_path='/home/john/temp/search.csv')
print(experiment.best_model_)
experiment.save_model('/home/john/temp/model.pkl')
experiment.load_model('/home/john/temp/model.pkl')
print(experiment.model)
_, X, y = DataSetLoader.load_property_inspection()
xgb = XGBRegressor()
cv = KFold()
experiment = Experiment(package='xgboost', model=xgb, scoring_metric='neg_mean_squared_error',
eval_metric='rmse', verbose=True)
experiment.train_model(X, y, validate=True, early_stopping=True, early_stopping_rounds=5,
plot_eval_history=True)
experiment.cross_validate(X, y, cv)
experiment.save_model('/home/john/temp/model.pkl')
experiment.load_model('/home/john/temp/model.pkl')
print(experiment.model)
_, X, y = DataSetLoader.load_property_inspection()
nn = KerasRegressor(build_fn=KerasBuilder.build_dense_model, input_size=X.shape[1], output_size=1,
loss='mean_squared_error', metrics=['mse'], batch_size=32, epochs=5)
cv = KFold()
experiment = Experiment(package='keras', model=nn, scoring_metric='neg_mean_squared_error',
verbose=True)
experiment.train_model(X, y, validate=True, early_stopping=True, early_stopping_rounds=2,
plot_eval_history=True)
experiment.cross_validate(X, y, cv)
experiment.save_model('/home/john/temp/model.h5')
experiment.load_model('/home/john/temp/model.h5')
print(experiment.model)
print('Done.')
|
luzheqi1987/nova-annotation
|
nova/tests/unit/virt/xenapi/test_driver.py
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from oslo.utils import units
from nova.compute import arch
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import driver
from nova.virt import fake
from nova.virt import xenapi
from nova.virt.xenapi import driver as xenapi_driver
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.flags(connection_url='test_url',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': arch.X86_64,
'host_cpu_info': {'cpu_count': 50},
'vcpus_used': 10,
'pci_passthrough_devices': ''}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('xen', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
# https://wiki.openstack.org/wiki/XenServer/Overhead
expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
(instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
xenapi_driver.OVERHEAD_BASE)
expected = math.ceil(expected)
overhead = driver.estimate_instance_overhead(instance)
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
self.mox.ReplayAll()
driver.set_bootable('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
|
google/myelin-acorn-electron-hardware
|
cpu_socket_minispartan_daughterboard/fpga-without-cpu/rom_from_vhdl.py
|
from __future__ import print_function
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import time
def rom_from_vhdl(data):
# extract binary data from a bunch of VHDL lines
bytes = ['\xff' for _ in range(16384)]
for line in data.split("\n"):
m = re.search('when x"(....)" => Di <= x"(..)"', line)
if not m: continue
addr, value = m.groups()
bytes[int(addr, 16)] = chr(int(value, 16))
return ''.join(bytes)
if __name__ == '__main__':
infile, outfile = sys.argv[1:]
print("extracting bytes from %s and saving to %s" % (infile, outfile))
bytes = rom_from_vhdl(open(infile).read())
print("writing %d bytes" % len(bytes))
open(outfile, 'w').write(bytes)
|
ratio/google-app-engine-django
|
appengine_django/db/base.py
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module looks after initialising the appengine api stubs."""
import logging
import os
from appengine_django import appid
from appengine_django import have_appserver
from appengine_django.db.creation import DatabaseCreation
from django.db.backends import BaseDatabaseWrapper
from django.db.backends import BaseDatabaseFeatures
from django.db.backends import BaseDatabaseOperations
def get_datastore_paths():
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = dev_appserver_main.DEFAULT_ARGS['datastore_path']
history_path = dev_appserver_main.DEFAULT_ARGS['history_path']
datastore_path = datastore_path.replace("dev_appserver", "django_%s" % appid)
history_path = history_path.replace("dev_appserver", "django_%s" % appid)
return datastore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None
datastore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace("datastore", "testdatastore")
history_path = history_path.replace("datastore", "testdatastore")
return datastore_path, history_path
def destroy_datastore(datastore_path, history_path):
"""Destroys the appengine datastore at the specified paths."""
for path in [datastore_path, history_path]:
if not path: continue
try:
os.remove(path)
except OSError, e:
if e.errno != 2:
logging.error("Failed to clear datastore: %s" % e)
class DatabaseError(Exception):
"""Stub class for database errors. Required by Django"""
pass
class IntegrityError(Exception):
"""Stub class for database integrity errors. Required by Django"""
pass
class DatabaseFeatures(BaseDatabaseFeatures):
"""Stub class to provide the feaures member expected by Django"""
pass
class DatabaseOperations(BaseDatabaseOperations):
"""Stub class to provide the options member expected by Django"""
pass
class DatabaseWrapper(BaseDatabaseWrapper):
"""App Engine database definition for Django.
This "database" backend does not support any of the standard backend
operations. The only task that it performs is to setup the api stubs required
by the appengine libraries if they have not already been initialised by an
appserver.
"""
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.creation = DatabaseCreation(self)
self.use_test_datastore = kwargs.get("use_test_datastore", False)
self.test_datastore_inmemory = kwargs.get("test_datastore_inmemory", True)
if have_appserver:
return
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths()
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
if self.use_test_datastore:
logging.debug("Configured API stubs for the test datastore")
else:
logging.debug("Configured API stubs for the development datastore")
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
destroy_datastore(*self._get_paths())
self._setup_stubs()
def close(self):
pass
def _commit(self):
pass
def cursor(self, *args):
pass
|
danclegg/python-CAS-RESTful-client
|
ST.py
|
#!/usr/bin/env python3
#####
#
# Title: ST.py
# Author: Dan Clegg
# Copyright: 2016, Dan Clegg
# LICENSE: Apache 2.0
#
#####
import requests
import string
import urllib3.contrib.pyopenssl # Necessary to get around Python 3 ssl errors when calling an https endpoint
from parse import *
from lxml import etree
def POST(url,body):
response = requests.post('%s' % url,data=body)
data = response.text
return data
class ST:
service = None
tgt = None
value = None
casServiceUrl=None
def __init__(self,service,tgt,casServiceUrl):
self.casServiceUrl = casServiceUrl
self.service = service
self.tgt = tgt
self.value = POST('%s/tickets/%s' % (self.casServiceUrl,self.tgt),'service=%s' % self.service)
|
matrix-org/synapse
|
synapse/handlers/room_member.py
|
# Copyright 2016-2020 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
GuestAccess,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
LimitExceededError,
ShadowBanError,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import (
JsonDict,
Requester,
RoomAlias,
RoomID,
StateMap,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberHandler(metaclass=abc.ABCMeta):
# TODO(paul): This handler currently contains a messy conflation of
# low-level API that works on UserID objects and so on, and REST-level
# API that takes ID strings and returns pagination chunks. These concerns
# ought to be separated out a lot better.
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
self.identity_handler = hs.get_identity_handler()
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.registration.enable_3pid_lookup
self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
)
self._join_rate_limiter_remote = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
)
self._invites_per_room_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
)
self._invites_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
self._third_party_invite_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_third_party_invite.per_second,
burst_count=hs.config.ratelimiting.rc_third_party_invite.burst_count,
)
self.request_ratelimiter = hs.get_request_ratelimiter()
@abc.abstractmethod
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and join a room that this server is not in
Args:
requester
remote_room_hosts: List of servers that can be used to join via.
room_id: Room that we are trying to join
user: User who is trying to join
content: A dict that should be used as the content of the join event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and knock on a room that this server is not in
Args:
remote_room_hosts: List of servers that can be used to knock via.
room_id: Room that we are trying to knock on.
user: User who is trying to knock.
content: A dict that should be used as the content of the knock event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite we have received from a remote server
Args:
invite_event_id: ID of the invite to be rejected
txn_id: optional transaction ID supplied by the client
requester: user making the rejection request, according to the access token
content: additional content to include in the rejection event.
Normally an empty dict.
Returns:
event id, stream_id of the leave event
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Rescind a local knock made on a remote room.
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: An optional transaction ID supplied by the client.
requester: The user making the request, according to the access token.
content: The content of the generated leave event.
Returns:
A tuple containing (event_id, stream_id of the leave event).
"""
raise NotImplementedError()
@abc.abstractmethod
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Notifies distributor on master process that the user has left the
room.
Args:
target
room_id
"""
raise NotImplementedError()
@abc.abstractmethod
async def forget(self, user: UserID, room_id: str) -> None:
raise NotImplementedError()
async def ratelimit_multiple_invites(
self,
requester: Optional[Requester],
room_id: Optional[str],
n_invites: int,
update: bool = True,
) -> None:
"""Ratelimit more than one invite sent by the given requester in the given room.
Args:
requester: The requester sending the invites.
room_id: The room the invites are being sent in.
n_invites: The amount of invites to ratelimit for.
update: Whether to update the ratelimiter's cache.
Raises:
LimitExceededError: The requester can't send that many invites in the room.
"""
await self._invites_per_room_limiter.ratelimit(
requester,
room_id,
update=update,
n_actions=n_invites,
)
async def ratelimit_invite(
self,
requester: Optional[Requester],
room_id: Optional[str],
invitee_user_id: str,
) -> None:
"""Ratelimit invites by room and by target user.
If room ID is missing then we just rate limit by target user.
"""
if room_id:
await self._invites_per_room_limiter.ratelimit(requester, room_id)
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
async def _local_membership_update(
self,
requester: Requester,
target: UserID,
room_id: str,
membership: str,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
txn_id: Optional[str] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
and persist a new event for the new membership change.
Args:
requester:
target:
room_id:
membership:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
txn_id:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
Returns:
Tuple of event ID and stream ordering position
"""
user_id = target.to_string()
if content is None:
content = {}
content["membership"] = membership
if requester.is_guest:
content["kind"] = "guest"
# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream
event, context = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
require_consent=require_consent,
outlier=outlier,
historical=historical,
)
prev_state_ids = await context.get_prev_state_ids()
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
if event.membership == Membership.JOIN:
newly_joined = True
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
newly_joined = prev_member_event.membership != Membership.JOIN
# Only rate-limit if the user actually joined the room, otherwise we'll end
# up blocking profile updates.
if newly_joined and ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_local.can_do_action(requester)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[target],
ratelimit=ratelimit,
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target, room_id)
# we know it was persisted, so should have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def copy_room_tags_and_direct_to_room(
self, old_room_id: str, new_room_id: str, user_id: str
) -> None:
"""Copies the tags and direct room state from one room to another.
Args:
old_room_id: The room ID of the old room.
new_room_id: The room ID of the new room.
user_id: The user's ID.
"""
# Retrieve user account data for predecessor room
user_account_data, _ = await self.store.get_account_data_for_user(user_id)
# Copy direct message state if applicable
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
# Check which key this room is under
if isinstance(direct_rooms, dict):
for key, room_id_list in direct_rooms.items():
if old_room_id in room_id_list and new_room_id not in room_id_list:
# Add new room_id to this key
direct_rooms[key].append(new_room_id)
# Save back to user's m.direct account data
await self.account_data_handler.add_account_data_for_user(
user_id, AccountDataTypes.DIRECT, direct_rooms
)
break
# Copy room tags if applicable
room_tags = await self.store.get_tags_for_room(user_id, old_room_id)
# Copy each room tag to the new room
for tag, tag_content in room_tags.items():
await self.account_data_handler.add_tag_to_room(
user_id, new_room_id, tag, tag_content
)
async def update_membership(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
Params:
requester: The user who is performing the update.
target: The user whose membership is being updated.
room_id: The room ID whose membership is being updated.
action: The membership change, see synapse.api.constants.Membership.
txn_id: The transaction ID, if given.
remote_room_hosts: Remote servers to send the update to.
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
new_room: Whether the membership update is happening in the context of a room
creation.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
Raises:
ShadowBanError if a shadow-banned requester attempts to send an invite.
"""
if action == Membership.INVITE and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
key = (room_id,)
as_id = object()
if requester.app_service:
as_id = requester.app_service.id
# We first linearise by the application service (to try to limit concurrent joins
# by application services), and then by room ID.
with (await self.member_as_limiter.queue(as_id)):
with (await self.member_linearizer.queue(key)):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
new_room=new_room,
require_consent=require_consent,
outlier=outlier,
historical=historical,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
return result
async def update_membership_locked(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
Assumes that the membership linearizer is already held for the room.
Args:
requester:
target:
room_id:
action:
txn_id:
remote_room_hosts:
third_party_signed:
ratelimit:
content:
new_room: Whether the membership update is happening in the context of a room
creation.
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
cases like MSC2716.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
"""
content_specified = bool(content)
if content is None:
content = {}
else:
# We do a copy here as we potentially change some keys
# later on.
content = dict(content)
# allow the server notices mxid to set room-level profile
is_requester_server_notices_user = (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
)
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
# Strip profile data, knowing that new profile data will be added to the
# event's content in event_creation_handler.create_event() using the target's
# global profile.
content.pop("displayname", None)
content.pop("avatar_url", None)
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400,
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
errcode=Codes.BAD_JSON,
)
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
raise SynapseError(
400,
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
errcode=Codes.BAD_JSON,
)
if "avatar_url" in content:
if not await self.profile_handler.check_avatar_size_and_mime_type(
content["avatar_url"],
):
raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
# The event content should *not* include the authorising user as
# it won't be properly signed. Strip it out since it might come
# back from a client updating a display name / avatar.
#
# This only applies to restricted rooms, but there should be no reason
# for a client to include it. Unconditionally remove it.
content.pop(EventContentFields.AUTHORISING_USER, None)
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
# if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join.
if third_party_signed is not None:
await self.federation_handler.exchange_third_party_invite(
third_party_signed["sender"],
target.to_string(),
room_id,
third_party_signed,
)
if not remote_room_hosts:
remote_room_hosts = []
if effective_membership_state not in ("leave", "ban"):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if effective_membership_state == Membership.INVITE:
target_id = target.to_string()
if ratelimit:
await self.ratelimit_invite(requester, room_id, target_id)
# block any attempts to invite the server notices mxid
if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite = False
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to send invites
is_requester_admin = True
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
if self.config.server.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
)
block_invite = True
if not await self.spam_checker.user_may_invite(
requester.user.to_string(), target_id, room_id
):
logger.info("Blocking invite due to spam checker")
block_invite = True
if block_invite:
raise SynapseError(403, "Invites have been disabled on this server")
# An empty prev_events list is allowed as long as the auth_event_ids are present
if prev_event_ids is not None:
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
allow_no_prev_events=allow_no_prev_events,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
historical=historical,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
current_state_ids = await self.state_handler.get_current_state_ids(
room_id, latest_event_ids=latest_event_ids
)
# TODO: Refactor into dictionary of explicitly allowed transitions
# between old and new state, with specific error messages for some
# transitions and generic otherwise
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
if old_state_id:
old_state = await self.store.get_event(old_state_id, allow_none=True)
old_membership = old_state.content.get("membership") if old_state else None
if action == "unban" and old_membership != "ban":
raise SynapseError(
403,
"Cannot unban user who was not banned"
" (membership=%s)" % old_membership,
errcode=Codes.BAD_STATE,
)
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
raise SynapseError(
403,
"Cannot %s user who was banned" % (action,),
errcode=Codes.BAD_STATE,
)
if old_state:
same_content = content == old_state.content
same_membership = old_membership == effective_membership_state
same_sender = requester.user.to_string() == old_state.sender
if same_sender and same_membership and same_content:
# duplicate event.
# we know it was persisted, so must have a stream ordering.
assert old_state.internal_metadata.stream_ordering
return (
old_state.event_id,
old_state.internal_metadata.stream_ordering,
)
if old_membership in ["ban", "leave"] and action == "kick":
raise AuthError(403, "The target user is not in the room")
# we don't allow people to reject invites to the server notice
# room, but they can leave it once they are joined.
if (
old_membership == Membership.INVITE
and effective_membership_state == Membership.LEAVE
):
is_blocked = await self._is_server_notice_room(room_id)
if is_blocked:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"You cannot reject this invite",
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
)
else:
if action == "kick":
raise AuthError(403, "The target user is not in the room")
is_host_in_room = await self._is_host_in_room(current_state_ids)
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(current_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
# Figure out whether the user is a server admin to determine whether they
# should be able to bypass the spam checker.
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to join rooms
bypass_spam_checker = True
else:
bypass_spam_checker = await self.auth.is_server_admin(requester.user)
inviter = await self._get_inviter(target.to_string(), room_id)
if (
not bypass_spam_checker
# We assume that if the spam checker allowed the user to create
# a room then they're allowed to join it.
and not new_room
and not await self.spam_checker.user_may_join_room(
target.to_string(), room_id, is_invited=inviter is not None
)
):
raise SynapseError(403, "Not allowed to join this room")
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
)
if remote_join:
if ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_remote.can_do_action(
requester,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
inviter = await self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
content["membership"] = Membership.JOIN
profile = self.profile_handler
if not content_specified:
content["displayname"] = await profile.get_displayname(target)
content["avatar_url"] = await profile.get_avatar_url(target)
if requester.is_guest:
content["kind"] = "guest"
remote_join_response = await self._remote_join(
requester, remote_room_hosts, room_id, target, content
)
return remote_join_response
elif effective_membership_state == Membership.LEAVE:
if not is_host_in_room:
# Figure out the user's current membership state for the room
(
current_membership_type,
current_membership_event_id,
) = await self.store.get_local_current_membership_for_user_in_room(
target.to_string(), room_id
)
if not current_membership_type or not current_membership_event_id:
logger.info(
"%s sent a leave request to %s, but that is not an active room "
"on this server, or there is no pending invite or knock",
target,
room_id,
)
raise SynapseError(404, "Not a known room")
# perhaps we've been invited
if current_membership_type == Membership.INVITE:
invite = await self.store.get_event(current_membership_event_id)
logger.info(
"%s rejects invite to %s from %s",
target,
room_id,
invite.sender,
)
if not self.hs.is_mine_id(invite.sender):
# send the rejection to the inviter's HS (with fallback to
# local event)
return await self.remote_reject_invite(
invite.event_id,
txn_id,
requester,
content,
)
# the inviter was on our server, but has now left. Carry on
# with the normal rejection codepath, which will also send the
# rejection out to any other servers we believe are still in the room.
# thanks to overzealous cleaning up of event_forward_extremities in
# `delete_old_current_state_events`, it's possible to end up with no
# forward extremities here. If that happens, let's just hang the
# rejection off the invite event.
#
# see: https://github.com/matrix-org/synapse/issues/7139
if len(latest_event_ids) == 0:
latest_event_ids = [invite.event_id]
# or perhaps this is a remote room that a local user has knocked on
elif current_membership_type == Membership.KNOCK:
knock = await self.store.get_event(current_membership_event_id)
return await self.remote_rescind_knock(
knock.event_id, txn_id, requester, content
)
elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room:
# The knock needs to be sent over federation instead
remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK
profile = self.profile_handler
if "displayname" not in content:
content["displayname"] = await profile.get_displayname(target)
if "avatar_url" not in content:
content["avatar_url"] = await profile.get_avatar_url(target)
return await self.remote_knock(
remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=latest_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
async def _should_perform_remote_join(
self,
user_id: str,
room_id: str,
remote_room_hosts: List[str],
content: JsonDict,
is_host_in_room: bool,
) -> Tuple[bool, List[str]]:
"""
Check whether the server should do a remote join (as opposed to a local
join) for a user.
Generally a remote join is used if:
* The server is not yet in the room.
* The server is in the room, the room has restricted join rules, the user
is not joined or invited to the room, and the server does not have
another user who is capable of issuing invites.
Args:
user_id: The user joining the room.
room_id: The room being joined.
remote_room_hosts: A list of remote room hosts.
content: The content to use as the event body of the join. This may
be modified.
is_host_in_room: True if the host is in the room.
Returns:
A tuple of:
True if a remote join should be performed. False if the join can be
done locally.
A list of remote room hosts to use. This is an empty list if a
local join is to be done.
"""
# If the host isn't in the room, pass through the prospective hosts.
if not is_host_in_room:
return True, remote_room_hosts
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
current_state_ids = await self.store.get_current_state_ids(room_id)
# If restricted join rules are not being used, a local join can always
# be used.
if not await self.event_auth_handler.has_restricted_join_rules(
current_state_ids, room_version
):
return False, []
# If the user is invited to the room or already joined, the join
# event can always be issued locally.
prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership in (
Membership.JOIN,
Membership.INVITE,
):
return False, []
# If the local host has a user who can issue invites, then a local
# join can be done.
#
# If not, generate a new list of remote hosts based on which
# can issue invites.
event_map = await self.store.get_events(current_state_ids.values())
current_state = {
state_key: event_map[event_id]
for state_key, event_id in current_state_ids.items()
}
allowed_servers = get_servers_from_users(
get_users_which_can_issue_invite(current_state)
)
# If the local server is not one of allowed servers, then a remote
# join must be done. Return the list of prospective servers based on
# which can issue invites.
if self.hs.hostname not in allowed_servers:
return True, list(allowed_servers)
# Ensure the member should be allowed access via membership in a room.
await self.event_auth_handler.check_restricted_join_rules(
current_state_ids, room_version, user_id, prev_member_event
)
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[
EventContentFields.AUTHORISING_USER
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
)
return False, []
async def transfer_room_state_on_room_upgrade(
self, old_room_id: str, room_id: str
) -> None:
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
ourselves or joining one, we can transfer over information from the previous room.
Copies user state (tags/push rules) for every local user that was in the old room, as
well as migrating the room directory state.
Args:
old_room_id: The ID of the old room
room_id: The ID of the new room
"""
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
users = await self.store.get_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
old_room = await self.store.get_room(old_room_id)
if old_room is not None and old_room["is_public"]:
await self.store.set_room_is_public(old_room_id, False)
await self.store.set_room_is_public(room_id, True)
# Transfer alias mappings in the room directory
await self.store.update_aliases_for_room(old_room_id, room_id)
# Check if any groups we own contain the predecessor room
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
for group_id in local_group_ids:
# Add new the new room to those groups
await self.store.add_room_to_group(
group_id, room_id, old_room is not None and old_room["is_public"]
)
# Remove the old room from those groups
await self.store.remove_room_from_group(group_id, old_room_id)
async def copy_user_state_on_room_upgrade(
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
) -> None:
"""Copy user-specific information when they join a new room when that new room is the
result of a room upgrade
Args:
old_room_id: The ID of upgraded room
new_room_id: The ID of the new room
user_ids: User IDs to copy state for
"""
logger.debug(
"Copying over room tags and push rules from %s to %s for users %s",
old_room_id,
new_room_id,
user_ids,
)
for user_id in user_ids:
try:
# It is an upgraded room. Copy over old tags
await self.copy_room_tags_and_direct_to_room(
old_room_id, new_room_id, user_id
)
# Copy over push rules
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
except Exception:
logger.exception(
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
"Skipping...",
old_room_id,
new_room_id,
user_id,
)
continue
async def send_membership_event(
self,
requester: Optional[Requester],
event: EventBase,
context: EventContext,
ratelimit: bool = True,
) -> None:
"""
Change the membership status of a user in a room.
Args:
requester: The local user who requested the membership
event. If None, certain checks, like whether this homeserver can
act as the sender, will be skipped.
event: The membership event.
context: The context of the event.
ratelimit: Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
if requester is not None:
sender = UserID.from_string(event.sender)
assert (
sender == requester.user
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids()
if event.membership == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(prev_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
if event.membership not in (Membership.LEAVE, Membership.BAN):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target_user, room_id)
async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool:
"""
Returns whether a guest can join a room based on its current state.
"""
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
if not guest_access_id:
return False
guest_access = await self.store.get_event(guest_access_id)
return bool(
guest_access
and guest_access.content
and guest_access.content.get(EventContentFields.GUEST_ACCESS)
== GuestAccess.CAN_JOIN
)
async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
"""Kick any local guest users from the room.
This is called when the room state changes from guests allowed to not-allowed.
Params:
current_state: the current state of the room. We will iterate this to look
for guest users to kick.
"""
for member_event in current_state:
try:
if member_event.type != EventTypes.Member:
continue
if not self.hs.is_mine_id(member_event.state_key):
continue
if member_event.content["membership"] not in {
Membership.JOIN,
Membership.INVITE,
}:
continue
if (
"kind" not in member_event.content
or member_event.content["kind"] != "guest"
):
continue
# We make the user choose to leave, rather than have the
# event-sender kick them. This is partially because we don't
# need to worry about power levels, and partially because guest
# users are a concept which doesn't hugely work over federation,
# and having homeservers have their own users leave keeps more
# of that decision-making and control local to the guest-having
# homeserver.
target_user = UserID.from_string(member_event.state_key)
requester = create_requester(
target_user, is_guest=True, authenticated_entity=self._server_name
)
handler = self.hs.get_room_member_handler()
await handler.update_membership(
requester,
target_user,
member_event.room_id,
"leave",
ratelimit=False,
require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s" % (e,))
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
"""
Get the room ID associated with a room alias.
Args:
room_alias: The alias to look up.
Returns:
A tuple of:
The room ID as a RoomID object.
Hosts likely to be participating in the room ([str]).
Raises:
SynapseError if room alias could not be found.
"""
directory_handler = self.directory_handler
mapping = await directory_handler.get_association(room_alias)
if not mapping:
raise SynapseError(404, "No such room alias")
room_id = mapping["room_id"]
servers = mapping["servers"]
# put the server which owns the alias at the front of the server list.
if room_alias.domain in servers:
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
return RoomID.from_string(room_id), servers
async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]:
invite = await self.store.get_invite_for_local_user_in_room(
user_id=user_id, room_id=room_id
)
if invite:
return UserID.from_string(invite.sender)
return None
async def do_3pid_invite(
self,
room_id: str,
inviter: UserID,
medium: str,
address: str,
id_server: str,
requester: Requester,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
"""Invite a 3PID to a room.
Args:
room_id: The room to invite the 3PID to.
inviter: The user sending the invite.
medium: The 3PID's medium.
address: The 3PID's address.
id_server: The identity server to use.
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
id_access_token: The optional identity server access token.
Returns:
The new stream ID.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if self.config.server.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
403, "Invites have been disabled on this server", Codes.FORBIDDEN
)
if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
await self._third_party_invite_limiter.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
)
if not can_invite:
raise SynapseError(
403,
"This third-party identifier can not be invited in this room",
Codes.FORBIDDEN,
)
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
invitee = await self.identity_handler.lookup_3pid(
id_server, medium, address, id_access_token
)
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
# We don't check the invite against the spamchecker(s) here (through
# user_may_invite) because we'll do it further down the line anyway (in
# update_membership_locked).
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
# Check if the spamchecker(s) allow this invite to go through.
if not await self.spam_checker.user_may_send_3pid_invite(
inviter_userid=requester.user.to_string(),
medium=medium,
address=address,
room_id=room_id,
):
raise SynapseError(403, "Cannot send threepid invite")
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
medium,
address,
room_id,
inviter,
txn_id=txn_id,
id_access_token=id_access_token,
)
return stream_id
async def _make_and_store_3pid_invite(
self,
requester: Requester,
id_server: str,
medium: str,
address: str,
room_id: str,
user: UserID,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
room_state = await self.state_handler.get_current_state(room_id)
inviter_display_name = ""
inviter_avatar_url = ""
member_event = room_state.get((EventTypes.Member, user.to_string()))
if member_event:
inviter_display_name = member_event.content.get("displayname", "")
inviter_avatar_url = member_event.content.get("avatar_url", "")
# if user has no display name, default to their MXID
if not inviter_display_name:
inviter_display_name = user.to_string()
canonical_room_alias = ""
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event:
canonical_room_alias = canonical_alias_event.content.get("alias", "")
room_name = ""
room_name_event = room_state.get((EventTypes.Name, ""))
if room_name_event:
room_name = room_name_event.content.get("name", "")
room_type = None
room_create_event = room_state.get((EventTypes.Create, ""))
if room_create_event:
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
room_join_rules = join_rules_event.content.get("join_rule", "")
room_avatar_url = ""
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
if room_avatar_event:
room_avatar_url = room_avatar_event.content.get("url", "")
(
token,
public_keys,
fallback_public_key,
display_name,
) = await self.identity_handler.ask_id_server_for_third_party_invite(
requester=requester,
id_server=id_server,
medium=medium,
address=address,
room_id=room_id,
inviter_user_id=user.to_string(),
room_alias=canonical_room_alias,
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
)
(
event,
stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.ThirdPartyInvite,
"content": {
"display_name": display_name,
"public_keys": public_keys,
# For backwards compatibility:
"key_validity_url": fallback_public_key["key_validity_url"],
"public_key": fallback_public_key["public_key"],
},
"room_id": room_id,
"sender": user.to_string(),
"state_key": token,
},
ratelimit=False,
txn_id=txn_id,
)
return stream_id
async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool:
# Have we just created the room, and is this about to be the very
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
# We can only get here if we're in the process of creating the room
return True
for etype, state_key in current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
continue
event_id = current_state_ids[(etype, state_key)]
event = await self.store.get_event(event_id, allow_none=True)
if not event:
continue
if event.membership == Membership.JOIN:
return True
return False
async def _is_server_notice_room(self, room_id: str) -> bool:
if self._server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self._server_notices_mxid in user_ids
class RoomMemberMasterHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
) -> Optional[bool]:
"""
Check if complexity of a remote room is too great.
Args:
room_id
remote_room_hosts
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
if complexity:
return complexity["v1"] > max_complexity
return None
async def _is_local_room_too_complex(self, room_id: str) -> bool:
"""
Check if the complexity of a local room is too great.
Args:
room_id: The room ID to check for complexity.
"""
max_complexity = self.hs.config.server.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
# and if it is the only entry we'd like to return a 404 rather than a
# 500.
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
check_complexity = self.hs.config.server.limit_remote_rooms.enabled
if (
check_complexity
and self.hs.config.server.limit_remote_rooms.admins_can_join
):
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
# Fetch the room complexity
too_complex = await self._is_remote_room_too_complex(
room_id, remote_room_hosts
)
if too_complex is True:
raise SynapseError(
code=400,
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
# We don't do an auth check if we are doing an invite
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
event_id, stream_id = await self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
# Check the room we just joined wasn't too large, if we didn't fetch the
# complexity of it before.
if check_complexity:
if too_complex is False:
# We checked, and we're under the limit.
return event_id, stream_id
# Check again, but with the local state events
too_complex = await self._is_local_room_too_complex(room_id)
if too_complex is False:
# We're under the limit.
return event_id, stream_id
# The room is too large. Leave.
requester = types.create_requester(
user, authenticated_entity=self._server_name
)
await self.update_membership(
requester=requester, target=user, room_id=room_id, action="leave"
)
raise SynapseError(
code=400,
msg=self.hs.config.server.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
return event_id, stream_id
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
invite_event = await self.store.get_event(invite_event_id)
room_id = invite_event.room_id
target_user = invite_event.state_key
# first of all, try doing a rejection via the inviting server
fed_handler = self.federation_handler
try:
inviter_id = UserID.from_string(invite_event.sender)
event, stream_id = await fed_handler.do_remotely_reject_invite(
[inviter_id.domain], room_id, target_user, content=content
)
return event.event_id, stream_id
except Exception as e:
# if we were unable to reject the invite, we will generate our own
# leave event.
#
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
logger.warning("Failed to reject invite: %s", e)
return await self._generate_local_out_of_band_leave(
invite_event, txn_id, requester, content
)
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: The transaction ID to use.
requester: The originator of the request.
content: The content of the leave event.
Implements RoomMemberHandler.remote_rescind_knock
"""
# TODO: We don't yet support rescinding knocks over federation
# as we don't know which homeserver to send it to. An obvious
# candidate is the remote homeserver we originally knocked through,
# however we don't currently store that information.
# Just rescind the knock locally
knock_event = await self.store.get_event(knock_event_id)
return await self._generate_local_out_of_band_leave(
knock_event, txn_id, requester, content
)
async def _generate_local_out_of_band_leave(
self,
previous_membership_event: EventBase,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Generate a local leave event for a room
This can be called after we e.g fail to reject an invite via a remote server.
It generates an out-of-band membership event locally.
Args:
previous_membership_event: the previous membership event for this user
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
room_id = previous_membership_event.room_id
target_user = previous_membership_event.state_key
content["membership"] = Membership.LEAVE
event_dict = {
"type": EventTypes.Member,
"room_id": room_id,
"sender": target_user,
"content": content,
"state_key": target_user,
}
# the auth events for the new event are the same as that of the previous event, plus
# the event itself.
#
# the prev_events consist solely of the previous membership event.
prev_event_ids = [previous_membership_event.event_id]
auth_event_ids = (
list(previous_membership_event.auth_event_ids()) + prev_event_ids
)
event, context = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=True,
)
event.internal_metadata.out_of_band_membership = True
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room. Attempts to do so via one remote out of a given list.
Args:
remote_room_hosts: A list of homeservers to try knocking through.
room_id: The ID of the room to knock on.
user: The user to knock on behalf of.
content: The content of the knock event.
Returns:
A tuple of (event ID, stream ID).
"""
# filter ourselves out of remote_room_hosts
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
return await self.federation_handler.do_knock(
remote_room_hosts, room_id, user.to_string(), content=content
)
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
user_left_room(self.distributor, target, room_id)
async def forget(self, user: UserID, room_id: str) -> None:
user_id = user.to_string()
member = await self.state_handler.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None
if membership is not None and membership not in [
Membership.LEAVE,
Membership.BAN,
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
if membership:
await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
"""
Return the list of users which can issue invites.
This is done by exploring the joined users and comparing their power levels
to the necessyar power level to issue an invite.
Args:
auth_events: state in force at this point in the room
Returns:
The users which can issue invites.
"""
invite_level = get_named_level(auth_events, "invite", 0)
users_default_level = get_named_level(auth_events, "users_default", 0)
power_level_event = get_power_level_event(auth_events)
# Custom power-levels for users.
if power_level_event:
users = power_level_event.content.get("users", {})
else:
users = {}
result = []
# Check which members are able to invite by ensuring they're joined and have
# the necessary power level.
for (event_type, state_key), event in auth_events.items():
if event_type != EventTypes.Member:
continue
if event.membership != Membership.JOIN:
continue
# Check if the user has a custom power level.
if users.get(state_key, users_default_level) >= invite_level:
result.append(state_key)
return result
def get_servers_from_users(users: List[str]) -> Set[str]:
"""
Resolve a list of users into their servers.
Args:
users: A list of users.
Returns:
A set of servers.
"""
servers = set()
for user in users:
try:
servers.add(get_domain_from_id(user))
except SynapseError:
pass
return servers
|
google-research/google-research
|
t5_closed_book_qa/t5_cbqa/tasks.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""T5 CBQA tasks."""
import functools
from . import metrics
from . import postprocessors
from . import preprocessors
import seqio
from t5.data import get_default_vocabulary
from t5.data import postprocessors as t5_postprocessors
from t5.data import preprocessors as t5_preprocessors
from t5.evaluation import metrics as t5_metrics
MixtureRegistry = seqio.MixtureRegistry
TaskRegistry = seqio.TaskRegistry
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" # GCS
DEFAULT_EXTRA_IDS = 100
NQ_TRAIN_SPLIT_START = 7830
NQ_TRAIN_SPLIT_END = 79168
NQO_TRAIN_SPLIT_END = 79168
WQ_TRAIN_SPLIT_END = 3417
TQA_TRAIN_SPLIT_END = 78785
DEFAULT_OUTPUT_FEATURES = {
"inputs": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True),
"targets": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True)
}
# ========================== Natural Questions =================================
# Natural Questions open domain variant that most closely matches the official
# evaluation procedure.
# The model is trained to predict all ground-truth answers
# and is only considered correct if it predicts all answers for any one of the
# annotators. As in the official evaluation, we consider questions with fewer
# than two non-null annotations unanswerable (given the context) but because we
# cannot predict unanswerability without the context, we only compute the recall
# metric. Further, because our model does not have access to the oracle context,
# we also normalize predicted and ground-truth answers when comparing them.
# This task uses a portion of the train set for validation.
TaskRegistry.add(
"natural_questions_nocontext",
source=seqio.TfdsDataSource(
tfds_name="natural_questions:0.0.2",
splits={
"train": f"train[{NQ_TRAIN_SPLIT_START}:{NQ_TRAIN_SPLIT_END}]",
"validation": f"train[:{NQ_TRAIN_SPLIT_START}]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[
functools.partial(
metrics.natural_questions,
# Train set does not contain multiple annotations.
non_null_threshold=1)
])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_nocontext_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions:0.0.2"),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[metrics.natural_questions])
# The standard open domain variant of Natural Questions, where:
# 1) the model is only ever trained to output a single answer;
# 2) if a question has multiple answers, it is trained to predict the first;
# 3) any questions with answers longer than five tokens are ignored;
# 4) answers are normalized before being compared;
# This task uses a portion of the train split for validation.
TaskRegistry.add(
"natural_questions_open",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This is a slight variant of the previous task that selects a random answer
# when multiple are provided instead of using the first.
TaskRegistry.add(
"natural_questions_open_randanswer",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
preprocessors.sample_answer,
seqio.preprocessors.tokenize,
# Do not cache - ensures we are sampling different answers.
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_open_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions_open:1.0.0"),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# ============================ Web Questions ===================================
# This task uses 10% of the train split for validation.
TaskRegistry.add(
"web_questions_open",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{WQ_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{WQ_TRAIN_SPLIT_END}:]",
"test": "test"
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# This tasks trains on the full train split.
TaskRegistry.add(
"web_questions_open_test",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
"train": "train",
"validation": "test",
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# =============================== Trivia QA ====================================
TaskRegistry.add(
"trivia_qa_open",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{TQA_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{TQA_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# This tasks trains on combined train and validation splits.
TaskRegistry.add(
"trivia_qa_open_test",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
"train": "train+validation",
"test": "test"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# ============================= CBQA Mixtures ==================================
# This mixture is to be used for hyperparameter tuning. Training happens on
# validation sets (if available) or subsplits of the train set. Evaluation
# happens on the validation (or heldout portion of the train) split.
MixtureRegistry.add(
"closed_book_qa",
[
"trivia_qa_open",
"natural_questions_open",
"web_questions_open"
],
default_rate=seqio.mixing_rate_num_examples
)
# This mixture is to be used at test time. Training happens on the combined
# train and validation splits and evaluation happens on the test split.
MixtureRegistry.add(
"closed_book_qa_test",
[
"trivia_qa_open_test",
"natural_questions_open_test",
"web_questions_open_test"
],
default_rate=seqio.mixing_rate_num_examples
)
# ========================= Salient Span Masking ===============================
TaskRegistry.add(
"salient_span_masked_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
preprocessors.mask_salient_spans,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
TaskRegistry.add(
"span_corrupted_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
functools.partial(
t5_preprocessors.rekey, key_map={
"inputs": None,
"targets": "text"
}),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
t5_preprocessors.span_corruption,
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
|
planetlabs/notebooks
|
jupyter-notebooks/landsat-ps-comparison/visual.py
|
import matplotlib.pyplot as plt
import numpy as np
def plot_image(masked_bands, title=None, figsize=(10, 10)):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
show(ax, masked_bands)
if title:
ax.set_title(title)
ax.set_axis_off()
def show(axis, bands, alpha=True):
"""Show bands as image with option of converting mask to alpha.
Alters axis in place.
"""
# Single band (2d array)
if bands.ndim == 2:
bands = [bands]
elif len(bands) == 3:
bands = [b for b in bands.copy()] # turn into list
else:
raise ValueError("Can only plot 1 or 3 band arrays, not an array with shape: {}".format(bands.shape))
bands = _scale_bands(bands)
if alpha and len(bands) == 3:
bands.append(_mask_to_alpha(bands[0].mask))
if len(bands) >= 3:
dbands = np.dstack(bands)
else:
dbands = bands[0]
return axis.imshow(dbands)
def _mask_bands(bands, mask):
return [np.ma.array(b, mask) for b in bands]
def _scale_bands(bands):
def _percentile(bands, percentile):
all_pixels = np.concatenate([b.compressed() for b in bands])
return np.percentile(all_pixels, percentile)
old_min = _percentile(bands, 2)
old_max = _percentile(bands, 98)
new_min = 0
new_max = 1
def _linear_scale(ndarray, old_min, old_max, new_min, new_max):
# https://en.wikipedia.org/wiki/Normalization_(image_processing)
return (ndarray - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
scaled = [np.clip(_linear_scale(b.astype(np.float),
old_min, old_max,
new_min, new_max),
new_min, new_max)
for b in bands]
filled = [b.filled(fill_value=0) for b in scaled]
return filled
def _mask_to_alpha(mask):
alpha = np.zeros_like(np.atleast_3d(mask))
alpha[~mask] = 1
return alpha
|
zhaopu7/models
|
text_classification/infer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import gzip
import paddle.v2 as paddle
import network_conf
import reader
from utils import *
def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
batch_size):
def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
probs = inferer.infer(input=test_batch, field=['value'])
assert len(probs) == len(test_batch)
for word_ids, prob in zip(test_batch, probs):
word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
" ".join(["{:0.4f}".format(p)
for p in prob]), word_text))
logger.info('begin to predict...')
use_default_data = (data_dir is None)
if use_default_data:
word_dict = paddle.dataset.imdb.word_dict()
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
test_reader = paddle.dataset.imdb.test(word_dict)
else:
assert os.path.exists(
word_dict_path), 'the word dictionary file does not exist'
assert os.path.exists(
label_dict_path), 'the label dictionary file does not exist'
word_dict = load_dict(word_dict_path)
word_reverse_dict = load_reverse_dict(word_dict_path)
label_reverse_dict = load_reverse_dict(label_dict_path)
test_reader = reader.test_reader(data_dir, word_dict)()
dict_dim = len(word_dict)
class_num = len(label_reverse_dict)
prob_layer = topology(dict_dim, class_num, is_infer=True)
# initialize PaddlePaddle
paddle.init(use_gpu=False, trainer_count=1)
# load the trained models
parameters = paddle.parameters.Parameters.from_tar(
gzip.open(model_path, 'r'))
inferer = paddle.inference.Inference(
output_layer=prob_layer, parameters=parameters)
test_batch = []
for idx, item in enumerate(test_reader):
test_batch.append([item[0]])
if len(test_batch) == batch_size:
_infer_a_batch(inferer, test_batch, word_reverse_dict,
label_reverse_dict)
test_batch = []
if len(test_batch):
_infer_a_batch(inferer, test_batch, word_reverse_dict,
label_reverse_dict)
test_batch = []
if __name__ == '__main__':
model_path = 'dnn_params_pass_00000.tar.gz'
assert os.path.exists(model_path), "the trained model does not exist."
nn_type = 'dnn'
test_dir = None
word_dict = None
label_dict = None
if nn_type == 'dnn':
topology = network_conf.fc_net
elif nn_type == 'cnn':
topology = network_conf.convolution_net
infer(
topology=topology,
data_dir=test_dir,
word_dict_path=word_dict,
label_dict_path=label_dict,
model_path=model_path,
batch_size=10)
|
sbunatyan/tavrida
|
tavrida/proxies.py
|
#!/usr/bin/env python
# Copyright (c) 2015 Sergey Bunatyan <sergey.bunatyan@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import entry_point
import messages
class RCPCallProxy(object):
"""
Proxy class for method call
"""
def __init__(self, postprocessor, service_name, method_name, source,
context, correlation_id, headers, kwargs):
super(RCPCallProxy, self).__init__()
self._postprocessor = postprocessor
self._service_name = service_name
self._method_name = method_name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers) or {}
self._kwargs = kwargs
def _make_request(self, context="", correlation_id="", reply_to="",
source=""):
if not source:
source = self._source
if not context:
context = self._context
if not correlation_id:
correlation_id = self._correlation_id
if not reply_to and not isinstance(reply_to, entry_point.EntryPoint):
reply_to = source.service
payload = self._kwargs
dst = entry_point.Destination(self._service_name, self._method_name)
headers = {
"correlation_id": correlation_id,
"reply_to": str(reply_to),
"source": str(source),
"destination": str(dst)
}
request_headers = self._headers.copy()
request_headers.update(headers)
request = messages.Request(request_headers, context, payload)
return request
def call(self, correlation_id="", context="", reply_to="", source=""):
"""
Executes
:param reply_to:
:param source:
:return:
"""
request = self._make_request(context=context,
correlation_id=correlation_id,
reply_to=reply_to,
source=source)
self._postprocessor.process(request)
def cast(self, correlation_id="", context="", source=""):
request = self._make_request(context=context,
correlation_id=correlation_id,
reply_to=entry_point.NullEntryPoint(),
source=source)
self._postprocessor.process(request)
def transfer(self, request, context="", reply_to="", source=""):
if request.context:
context = context or {}
context.update(request.context)
request = self._make_request(correlation_id=request.correlation_id,
reply_to=reply_to,
context=context,
source=source)
self._postprocessor.process(request)
class RPCMethodProxy(object):
def __init__(self, postprocessor, service_name, method_name, source,
context="", correlation_id="", headers=""):
self._postprocessor = postprocessor
self._service_name = service_name
self._method_name = method_name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers)
def __call__(self, **kwargs):
self._kwargs = kwargs
return RCPCallProxy(self._postprocessor, self._service_name,
self._method_name, self._source, self._context,
self._correlation_id, self._headers, kwargs)
class RPCServiceProxy(object):
def __init__(self, postprocessor, name, source, context=None,
correlation_id="", headers=None):
self._postprocessor = postprocessor
self._name = name
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers)
def __getattr__(self, item):
return RPCMethodProxy(self._postprocessor, self._name, item,
self._source, self._context,
self._correlation_id, self._headers)
class RPCProxy(object):
def __init__(self, postprocessor, source, context=None,
correlation_id="", headers=None):
self._postprocessor = postprocessor
self._source = source
self._context = context
self._correlation_id = correlation_id
self._headers = copy.copy(headers) or {}
def _get_discovery_service(self):
return self._postprocessor.discovery_service
def __getattr__(self, item):
disc = self._get_discovery_service()
disc.get_remote(item)
return RPCServiceProxy(self._postprocessor, item, self._source,
self._context, self._correlation_id)
def add_headers(self, headers):
self._headers = copy.copy(headers)
def publish(self, correlation_id="", **kwargs):
headers = {
"correlation_id": correlation_id or self._correlation_id,
"source": str(self._source)
}
notification_headers = self._headers.copy()
notification_headers.update(headers)
publication = messages.Notification(notification_headers,
self._context, kwargs)
self._postprocessor.process(publication)
|
scorelab/DroneSym
|
dronesym-python/flask-api/src/main.py
|
"""
Main entry point for the Flask API. The API will provide
"""
# an interface to communicate with Dronekit instances
from flask import jsonify, Flask
from flask import abort, request
from flask import make_response
import dronepool
import threadrunner
app = Flask(__name__)
api_base_url = '/dronesym/api/flask'
# response.headers['X-Content-Type-Options'] = 'nosniff'
# response.headers['X-Frame-Options'] = 'SAMEORIGIN'
@app.after_request
def apply_caching(response):
response.headers["X-Frame-Options"] = "SAMEORIGIN"
response.headers['X-Content-Type-Options'] = 'nosniff'
return response
@app.errorhandler(404)
def send_not_found(error):
return make_response(jsonify({"message": "Resource not found"}), 404)
@app.errorhandler(400)
def send_bad_request(error):
return make_response(jsonify({"message": "Bad request"}), 400)
@app.route(api_base_url + '/spawn', methods=['POST'])
def create_new_drone():
# This routes creates a new Dronekit SITL in the Drone Pool.
# The initial position needs to be send along the request as a JSON
global q
# response.headers['X-Content-Type-Options'] = 'nosniff'
# response.headers['X-Frame-Options'] = 'SAMEORIGIN'
if not request.json or not 'location'in request.json or 'droneId' not in request.json:
abort(400)
print(request.json)
home = request.json['location']
drone_id = request.json['droneId']
q.put((dronepool.create_new_drone, {"db_key": drone_id, "home": home}))
return jsonify({"status": "OK", "message": "Created new drone"})
@app.route(api_base_url + '/remove/<string:drone_id>', methods=['POST'])
def remove_drone(drone_id):
global q
q.put((dronepool.remove_drone, {"drone_id": drone_id}))
return jsonify({"status": "OK", "message": "Removed drone"})
@app.route(api_base_url + '/<string:drone_id>/takeoff', methods=['POST'])
def send_takeoff(drone_id):
# This route issues a takeoff command to a specific drone
global q
if request.json and request.json['waypoints'] and len(
request.json['waypoints']) > 0:
q.put(
(dronepool.takeoff_drone, {
"drone_id": drone_id, "waypoints": request.json['waypoints']}))
else:
q.put((dronepool.takeoff_drone, {"drone_id": drone_id}))
return jsonify({"status": "taking_off", "drone_id": drone_id})
@app.route(api_base_url + '/<string:drone_id>/land', methods=['POST'])
def send_land(drone_id):
global q
q.put((dronepool.land_drone, {"drone_id": drone_id}))
return jsonify({"status": "landing", "drone_id": drone_id})
@app.route(api_base_url + '/<string:drone_id>/resume', methods=['POST'])
def send_resume(drone_id):
global q
q.put((dronepool.resume_flight, {"drone_id": drone_id}))
return jsonify({"status": "resuming", "drone_id": drone_id})
if __name__ == '__main__':
threadrunner.initialize()
q = threadrunner.q
dronepool.initialize()
app.run(debug=True, use_reloader=False)
|
dkluffy/dkluff-code
|
code/f5tools/configparse_f5.py
|
#!/usr/bin/env python
#parser {} mode f5config to csv
import sys
import re
REG_IP=re.compile(r'[1-9][0-9]*(\.[0-9]+){3}')
REG_INDENT=re.compile('^[ ]*')
EOF='{'
FOE='}'
""" config for read argv of subcmd"""
BLOCK_SUBCMD = (
"members",
"origins",
"rules",
)
NONBLOCK_SUBCMD = (
"pool",
"destination",
"originating-address",
"translation-address",
"translation",
)
ALL_SUBCMD = NONBLOCK_SUBCMD + BLOCK_SUBCMD
PREFIX_POOL = "ltm pool "
PREFIX_VSERVER = "ltm virtual "
def ldepth(l,r):
d=r.search(l).group(0)
return (len(d),d)
def readconf(fdir):
f=open(fdir)
b=[]
for l in f:
b.append(l)
f.close()
return b
"""Pop a block by indent"""
def pop_block(f):
b=[ ]
b.append(f[0])
md=ldepth(f[0],r=REG_INDENT)
flag=(' '*md[0]) + FOE
i=1
l=len(f)
while i<l:
b.append(f[i])
i+=1
if f[i-1].startswith(flag):
break
return b,i
def block_to_dict(block):
r_eof = '[^' + EOF + ']*'
rdict={}
k=re.search(r_eof,block[0]).group(0)
rdict[k]=block[1:-1]
return rdict
def nonblock_to_dict(x):
r=x.split()
lenth_r=len(r)
if lenth_r == 2:
return { strip_key(r[0]):r[1] }
return { strip_key(x):x }
strip_key=lambda x: x.strip().strip(EOF).strip()
def readblock(block):
if block[0].strip().endswith(EOF):
return pop_block(block)
return block[0],1
"""
convert a block of :
"string"
or [sub1,sub2,]
to dict
"""
def parseblock(block):
"""return if a string """
if isinstance(block,str):
return nonblock_to_dict(block)
bdict={}
lenth_block= len(block)
if lenth_block == 0:
return bdict
b,i = readblock(block)
if isinstance(b,list):
bdict.update({ strip_key(b[0]):parseblock(b[1:-1]) })
else:
bdict.update(nonblock_to_dict(b))
bdict.update(parseblock(block[i:]))
return bdict
"""read argv"""
keyformat = lambda k : k.split().pop()
def read_dict_byprefix(dic,n):
vdict={}
for k in dic:
if k.startswith(n):
subdict={}
for kk in dic[k]:
if kk in ALL_SUBCMD:
subdict.update({ kk: dic[k][kk] })
vdict.update( { keyformat(k): subdict } )
return vdict
HEADLINE="VserverName,PublicIP,InternalIP,PoolName"
REG_DEL_PREFIX=re.compile("/Common/")
def printcsv_byvserver(lv,lp):
"""
ltmvserver,destination,member,pool
"""
print HEADLINE
output="%s,%s,%s,%s"
for k in lv:
destination=lv[k]["destination"]
pool=lv[k]["pool"]
members=lp[pool]["members"].keys()
for m in members:
r = (output) % (\
k,destination,m,pool
)
print REG_DEL_PREFIX.sub("",r)
if __name__ == "__main__":
conf = readconf(sys.argv[1])
conf_dict = {}
"""read conf to dict"""
while len(conf) > 0:
b,i = readblock(conf)
conf_dict.update(parseblock(b))
conf=conf[i:]
ltm_pool = read_dict_byprefix(conf_dict,PREFIX_POOL)
ltm_vserver = read_dict_byprefix(conf_dict,PREFIX_VSERVER)
#printcsv_byvserver(ltm_vserver,ltm_pool)
for k in ltm_vserver:
try:
if not "/Common/slowloris_dos_mitigate" in [ i for i in ltm_vserver[k]['rules']]:
print k
continue
except Exception as e:
print k
continue
|
hajicj/MUSCIMarker
|
MUSCIMarker/mlclass_selection.py
|
"""This module implements a class that..."""
from __future__ import print_function, unicode_literals
from builtins import range
import logging
from kivy.app import App
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from MUSCIMarker.utils import keypress_to_dispatch_key
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
mlclass_selection_dialog_kv = '''
<MLClassSelectionDialog@Popup>
size_hint: None, None
size: app.root.size[0] * 0.5, app.root.size[1] * 0.2
pos_hint: {'center_x': 0.5, 'centery_y': 0.5}
title: 'Select MLClass by typing its name.'
# on_text: current_name_label.text = self.get_current_name()
GridLayout:
id: grid
cols: 1
padding: '24dp'
TextInput:
id: text_input
size_hint_y: None
height: dp(24)
multiline: False
focus: True
text: ''
on_text: root.text = self.text
BoxLayout:
size_hint_y: None
height: dp(24)
Button:
id: cancel
text: root.cancel_text
on_release: root.cancel()
Button:
id: ok
text: root.ok_text
on_release: root.ok()
Label:
id: available_names_label
size_hint_y: None
height: dp(24)
text: ''
'''
Builder.load_string(mlclass_selection_dialog_kv)
class MLClassSelectionDialog(Popup):
"""The MLClassSelectionDialog class allows for keyboard-based
selection of the current MLClass."""
text = StringProperty('')
ok_text = StringProperty('OK')
cancel_text = StringProperty('Cancel')
__events__ = ('on_ok', 'on_cancel')
def __init__(self, *args, **kwargs):
super(MLClassSelectionDialog, self).__init__(*args, **kwargs)
self.create_bindings()
def ok(self):
self.dispatch('on_ok')
self.dismiss()
def cancel(self):
self.dispatch('on_cancel')
self.dismiss()
def on_ok(self):
#if len(self.available_clsnames) == 0:
# return
name = self.get_current_name()
if name is None:
return
if len(self.available_clsnames) > 1:
logging.info('MLClassSelectionDialog:'
' More than one name possible: {0},'
' picking: {1}.'
''.format(self.available_clsnames, name))
App.get_running_app().currently_selected_mlclass_name = name
def on_cancel(self):
self.dismiss()
def dismiss(self, *largs, **kwargs):
self.remove_bindings()
super(MLClassSelectionDialog, self).dismiss()
def on_text(self, instance, pos):
#self.ids['current_name_label'].text = self.get_current_name()
n = self.get_current_name()
if n is None:
pass
#self.ids['text_input'].suggestion_text = ''
elif len(pos) >= len(n):
pass
# self.ids['text_input'].suggestion_text =
else:
self.ids['text_input'].suggestion_text = self.get_current_name()[len(self.text):]
names = self.currently_available_names
if len(names) > 5:
names = names[:5] + ['...']
name_str = ', '.join(names)
self.ids['available_names_label'].text = name_str
##########################################################################
# Making it possible to operate the popup with Esc to cancel,
# Enter to confirm.
def create_bindings(self):
Window.bind(on_key_down=self.on_key_down)
Window.bind(on_key_up=self.on_key_up)
def remove_bindings(self):
Window.unbind(on_key_down=self.on_key_down)
Window.unbind(on_key_up=self.on_key_up)
def on_key_down(self, window, key, scancode, codepoint, modifier):
# Should control enter to confirm/escape to cancel
dispatch_key = keypress_to_dispatch_key(key, scancode, codepoint, modifier)
logging.info('MLClassSelectionDialog: Handling keypress: {0}'.format(dispatch_key))
is_handled = self.handle_dispatch_key(dispatch_key)
# Don't let the event propagate through the dialog.
return True
def handle_dispatch_key(self, dispatch_key):
"""Does the "heavy lifting" in keyboard controls: responds to a dispatch key.
Decoupling this into a separate method facillitates giving commands to
the ListView programmatically, not just through user input,
and this way makes automation easier.
:param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key``
number, ``+``, and comma-separated modifiers.
:returns: True if the dispatch key got handled, False if there is
no response defined for the given dispatch key.
"""
if dispatch_key == '13': # Enter
logging.info('Confirming MLClassSelectionDialog!')
self.ok()
elif dispatch_key == '27': # Escape
logging.info('Cancelling MLClassSelectionDialog!')
self.cancel()
#elif dispatch_key == '9': # Tab
# pass
# Special keys are handled separately in the TextInput, so
# they would get caught by the "return True". We need to call
# their operations explicitly.
elif dispatch_key == '8': # Backspace
self.ids['text_input'].do_backspace()
elif dispatch_key == '9': # Tab
# Process common prefix
lcp = self._longest_common_prefix
infix = lcp[len(self.text):]
logging.info('MLClassSelectionDialog: Found LCP {0}, inf {1}'
''.format(lcp, infix))
self.ids['text_input'].text = self.text + infix
else:
return False
return True
def on_key_up(self, window, key, scancode, *args, **kwargs):
return False
##########################################################################
# The name selection mechanism
def clsnames_with_prefix(self, prefix):
return [clsname for clsname in self.available_clsnames
if clsname.startswith(prefix)]
@property
def available_clsnames(self):
mlclasses_by_name = App.get_running_app().annot_model.mlclasses_by_name
clsnames = list(mlclasses_by_name.keys())
sorted_clsnames = sorted(clsnames, key=lambda n: mlclasses_by_name[n].clsid)
return sorted_clsnames
@property
def currently_available_names(self):
return self.clsnames_with_prefix(self.text)
@property
def _longest_common_prefix(self):
names = self.currently_available_names
if len(names) == 0:
return ''
if len(names) == 1:
return names[0]
pref = ''
shortest_name_length = min([len(n) for n in names])
for i in range(shortest_name_length):
pref = names[0][:i+1]
for n in names[1:]:
if n[:i+1] != pref: # Unequal at i-th letter
return pref[:-1]
# Shortest word is at the same time the prefix
return names[0][:shortest_name_length]
def get_current_name(self):
"""This is the "clever" part of the name selection mechanism.
Right now, it just selects the first available name."""
names = self.currently_available_names
if len(names) == 0:
return None
output = names[0]
# Exact match has preference
for n in names:
if n == self.text:
output = n
return output
##########################################################################
# Feedback mechanism
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_v2_generated_entity_types_batch_create_entities_sync.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchCreateEntities
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_EntityTypes_BatchCreateEntities_sync]
from google.cloud import dialogflow_v2
def sample_batch_create_entities():
# Create a client
client = dialogflow_v2.EntityTypesClient()
# Initialize request argument(s)
entities = dialogflow_v2.Entity()
entities.value = "value_value"
entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflow_v2.BatchCreateEntitiesRequest(
parent="parent_value",
entities=entities,
)
# Make the request
operation = client.batch_create_entities(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_EntityTypes_BatchCreateEntities_sync]
|
suensummit/erjsTesting
|
testServer.py
|
import SimpleHTTPServer
import SocketServer
import sys
PORT = 1548
class MyHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
log_file = open('logfile.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
Handler = MyHTTPHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
wwitzel3/awx
|
awx/api/urls/job.py
|
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.conf.urls import url
from awx.api.views import (
JobList,
JobDetail,
JobStart,
JobCancel,
JobRelaunch,
JobCreateSchedule,
JobJobHostSummariesList,
JobJobEventsList,
JobActivityStreamList,
JobStdout,
JobNotificationsList,
JobLabelList,
JobHostSummaryDetail,
)
urls = [
url(r'^$', JobList.as_view(), name='job_list'),
url(r'^(?P<pk>[0-9]+)/$', JobDetail.as_view(), name='job_detail'),
url(r'^(?P<pk>[0-9]+)/start/$', JobStart.as_view(), name='job_start'), # Todo: Remove In 3.3
url(r'^(?P<pk>[0-9]+)/cancel/$', JobCancel.as_view(), name='job_cancel'),
url(r'^(?P<pk>[0-9]+)/relaunch/$', JobRelaunch.as_view(), name='job_relaunch'),
url(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', JobJobHostSummariesList.as_view(), name='job_job_host_summaries_list'),
url(r'^(?P<pk>[0-9]+)/job_events/$', JobJobEventsList.as_view(), name='job_job_events_list'),
url(r'^(?P<pk>[0-9]+)/activity_stream/$', JobActivityStreamList.as_view(), name='job_activity_stream_list'),
url(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
url(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
url(r'^(?P<pk>[0-9]+)/labels/$', JobLabelList.as_view(), name='job_label_list'),
url(r'^(?P<pk>[0-9]+)/$', JobHostSummaryDetail.as_view(), name='job_host_summary_detail'),
]
__all__ = ['urls']
|
google/citest
|
tests/json_predicate/path_transforms_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
"""Tests the citest.json_predicate.path_transforms module."""
import unittest
from citest.base import ExecutionContext
from citest.json_predicate import FieldDifference
class PathTransformTest(unittest.TestCase):
def assertEqual(self, a, b, msg=''):
if not msg:
msg = 'EXPECTED\n{0!r}\nGOT\n{1!r}'.format(a, b)
super(PathTransformTest, self).assertEqual(a, b, msg)
def test_field_difference_eq(self):
orig = FieldDifference('X', 'Y')
same = FieldDifference('X', 'Y')
diff = FieldDifference('Y', 'X')
self.assertEqual(orig, same)
self.assertNotEqual(orig, diff)
def test_field_difference(self):
context = ExecutionContext()
source = {'a': 7, 'b': 4}
xform = FieldDifference('a', 'b')
self.assertEqual(3, xform(context, source))
def test_field_difference_indirect(self):
context = ExecutionContext()
source = {'a': 7, 'b': 4}
xform = FieldDifference(lambda x: 'b', lambda x: 'a')
self.assertEqual(-3, xform(context, source))
if __name__ == '__main__':
unittest.main()
|
Danceiny/HackGirlfriend
|
Platform/ZuiwanCenter/ZuiwanDBManager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
from Libraries.Singleton.Singleton import Singleton
import Libraries.ErrorDefine as ED
from Libraries.DBModel import *
from Platform.ConfigCenter.ConfigCenter import ConfigCenter
from Platform.LogCenter.LogCenter import LogCenter
from Libraries.Utils import *
@Singleton
class ZuiwanDBManager():
def __init__(self):
self.logger = LogCenter.instance().get_logger('ZuiwanCenterLog')
self.db_model = DBModelFactory.instance().get_db_model()
# self.db_model_read = DBModelFactory.instance().get_db_model(readonly=True)
# self.table_name_user = self.conf.get('RDS', 'table_name_zuiwan_user')
# self.table_name_user_count = self.conf.getint('RDS', 'table_name_zuiwan_user_count')
# self.table_name_user_keys = json.loads(self.conf.get('RDS', 'table_name_zuiwan_user_keys'))
self.table_name_user = "zuiwan_user"
self.table_name_user_count = 1
self.table_name_user_keys = ('zuser_id', 'nick_name', 'email', 'create_time', 'avatar_url', 'credits','role','psw','real_name','sex')
self.table_name_meeting = "zuiwan_meeting"
self.table_name_meeting_count = 1
self.table_name_meeting_keys = ('meeting_id','topic','start_time','duration','des','people')
# 学校相关的信息存储到另一张表,由zuser_id关联
############## user part #################
def get_users_list(self, data):
limit_count = int(data.get('count', 10))
select_sql = DBModel.sql_select(self.table_name_user,
keys=data.get('keys',['zuser_id', 'nick_name', 'email', 'create_time', 'avatar_url', 'credits','role']),
limit='0,%d' % limit_count, order=[{'key': 'credits', 'desc': True}])
records = self.db_model.GetList(select_sql)
return records
def get_user_detail(self, data):
zuser_id = str(data['zuser_id'])
where_condition = DBModel.sql_and({"zuser_id": zuser_id})
sql = DBModel.sql_select(self.table_name_user, where=where_condition)
records = self.db_model.GetOne(sql)
return records
def find_user(self, data):
'''
query zuiwan_user info(without content by nick_name or zuser_id
:param data: t = queryType, c = queryContent
:return:
'''
limit_count = int(data.get('count', 3))
select_sql = DBModel.sql_select(self.table_name_user,
keys=self.table_name_user_keys,
where="`%s` like '%%%s%%'" % (data.get('t',''),data.get('c','')),
limit='0,%d' % limit_count, order=[{'key': 'create_time', 'desc': True}])
records = self.db_model.GetList(select_sql)
return records
def add_user(self, data):
'''
:param data {}
:return:
'''
result = {'code': ED.no_err}
if not 'zuser_id' in data or len(data.get('zuser_id','')) <= 0:
data['zuser_id'] = get_now_time_str_ms().replace('.', '') # like '1497257116332'
if not 'create_time' in data or data.get('create_time',0) == 0:
data['create_time'] = get_now_time_int()
else:
return {'code':ED.err_params}
sql = self.db_model.sql_insert(table=self.table_name_user, data=data, keys=self.table_name_user_keys)
flag = self.db_model.execute(sql)
if flag == None or flag.rowcount <= 0:
result['code'] = ED.unknown_err
return result
def delete_user(self,data):
result = {'code':ED.no_err}
try:
sql_delete_user = ""
if 'zuser_id' in data:
sql_delete_user = DBModel.sql_delete(self.table_name_user,where=DBModel.sql_and({'zuser_id':data['zuser_id']}))
elif 'nick_name' in data:
sql_delete_user = DBModel.sql_delete(self.table_name_user,where=DBModel.sql_and({'nick_name':data['nick_name']}))
else:
return {'code':ED.err_params}
ret_del = self.db_model.execute(sql_delete_user)
if ret_del == None:
result['code'] = ED.err_sys
except Exception,e:
self.logger.error("ZuiwanCenter delete user error, sql=[%s],msg=[%s]" % (repr(sql_delete_user),repr(e)))
result['code'] = ED.err_sys
return result
def update_user(self,data,params=None):
result = {'code':ED.no_err}
if params == None or len(params) == 0:
params = self.table_name_user_keys
try:
sql_update_user = DBModel.sql_update(self.table_name_user,data,where=DBModel.sql_and({'zuser_id':data['zuser_id']}),keys=params)
flag = self.db_model.execute(sql_update_user)
if flag == None:
result['code'] = self.add_user(data)['code']
except Exception,e:
self.logger.error("ZuiwanCenter update user error. sql=[%s],msg=[%s]" % (repr(sql_update_user),repr(e)))
result['code'] = ED.err_sys
return result
############ meeting part ################
def get_meetings_list(self,data):
pass
def get_meeting_info(self,data):
pass
def add_meeting(self,data):
result = {'code': ED.no_err}
if not 'meeting_id' in data or len(data.get('meeting_id') or '') <= 0:
data['meeting_id'] = get_now_time_str_ms().replace('.', '') # like '1497257116332'
sql = self.db_model.sql_insert(table=self.table_name_meeting, data=data, keys=self.table_name_meeting_keys)
flag = self.db_model.execute(sql)
if flag == None or flag.rowcount <= 0:
result['code'] = ED.unknown_err
return result
def delete_meeting(self,data):
result = {'code':ED.no_err}
if 'meeting_id' in data:
sql_delete_meeting = DBModel.sql_delete(self.table_name_meeting,where=DBModel.sql_and({'meeting_id':data['meeting_id']}))
else:
return {'code':ED.err_params}
try:
ret_del = self.db_model.execute(sql_delete_meeting)
if ret_del == None:
result['code'] = ED.err_sys
except Exception,e:
self.logger.error("ZuiwanCenter delete meeting error, sql=[%s],msg=[%s]" % (repr(sql_delete_meeting),repr(e)))
result['code'] = ED.err_sys
return result
def update_meeting(self,data,params=None):
result = {'code':ED.no_err}
if params == None or len(params) == 0:
params = self.table_name_keys
try:
sql_update_meeting = DBModel.sql_update(self.table_name_user,data,where=DBModel.sql_and({'meeting_id':data['meeting_id']}),keys=params)
flag = self.db_model.execute(sql_update_meeting)
if flag == None:
result['code'] = self.add_user(data)['code']
except Exception,e:
self.logger.error("ZuiwanCenter update meeting error. sql=[%s],msg=[%s]" % (repr(sql_update_meeting),repr(e)))
result['code'] = ED.err_sys
return result
def get_next_meeting_leave_list(self,data):
pass
def get_last_meeting_leave_list(self,data):
pass
def get_leave_list(self,data):
result = {'code':ED.no_err}
pass
|
saltstack/salt
|
tests/integration/utils/test_win_runas.py
|
import inspect
import logging
import os
import socket
import subprocess
import sys
import textwrap
import threading
import time
import traceback
import salt.utils.files
import salt.utils.win_runas
import yaml
from tests.support.case import ModuleCase
from tests.support.helpers import with_system_user
from tests.support.mock import Mock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
try:
import win32service
import win32serviceutil
import win32event
import servicemanager
import win32api
CODE_DIR = win32api.GetLongPathName(RUNTIME_VARS.CODE_DIR)
HAS_WIN32 = True
except ImportError:
# Mock win32serviceutil object to avoid
# a stacktrace in the _ServiceManager class
win32serviceutil = Mock()
HAS_WIN32 = False
logger = logging.getLogger(__name__)
PASSWORD = "P@ssW0rd"
NOPRIV_STDERR = "ERROR: Logged-on user does not have administrative privilege.\n"
PRIV_STDOUT = (
"\nINFO: The system global flag 'maintain objects list' needs\n "
"to be enabled to see local opened files.\n See Openfiles "
"/? for more information.\n\n\nFiles opened remotely via local share "
"points:\n---------------------------------------------\n\n"
"INFO: No shared open files found.\n"
)
if HAS_WIN32:
RUNAS_PATH = os.path.abspath(os.path.join(CODE_DIR, "runas.py"))
RUNAS_OUT = os.path.abspath(os.path.join(CODE_DIR, "runas.out"))
def default_target(service, *args, **kwargs):
while service.active:
time.sleep(service.timeout)
class _ServiceManager(win32serviceutil.ServiceFramework):
"""
A windows service manager
"""
_svc_name_ = "Service Manager"
_svc_display_name_ = "Service Manager"
_svc_description_ = "A Service Manager"
run_in_foreground = False
target = default_target
def __init__(self, args, target=None, timeout=60, active=True):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.timeout = timeout
self.active = active
if target is not None:
self.target = target
@classmethod
def log_error(cls, msg):
if cls.run_in_foreground:
logger.error(msg)
servicemanager.LogErrorMsg(msg)
@classmethod
def log_info(cls, msg):
if cls.run_in_foreground:
logger.info(msg)
servicemanager.LogInfoMsg(msg)
@classmethod
def log_exception(cls, msg):
if cls.run_in_foreground:
logger.exception(msg)
exc_info = sys.exc_info()
tb = traceback.format_tb(exc_info[2])
servicemanager.LogErrorMsg("{} {} {}".format(msg, exc_info[1], tb))
@property
def timeout_ms(self):
return self.timeout * 1000
def SvcStop(self):
"""
Stop the service by; terminating any subprocess call, notify
windows internals of the stop event, set the instance's active
attribute to 'False' so the run loops stop.
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
self.active = False
def SvcDoRun(self):
"""
Run the monitor in a separete thread so the main thread is
free to react to events sent to the windows service.
"""
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ""),
)
self.log_info("Starting Service {}".format(self._svc_name_))
monitor_thread = threading.Thread(target=self.target_thread)
monitor_thread.start()
while self.active:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout_ms)
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
self.log_info("Stopping Service")
break
if not monitor_thread.is_alive():
self.log_info("Update Thread Died, Stopping Service")
break
def target_thread(self, *args, **kwargs):
"""
Target Thread, handles any exception in the target method and
logs them.
"""
self.log_info("Monitor")
try:
self.target(self, *args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
# TODO: Add traceback info to windows event log objects
self.log_exception("Exception In Target")
@classmethod
def install(cls, username=None, password=None, start_type=None):
if hasattr(cls, "_svc_reg_class_"):
svc_class = cls._svc_reg_class_
else:
svc_class = win32serviceutil.GetServiceClassString(cls)
win32serviceutil.InstallService(
svc_class,
cls._svc_name_,
cls._svc_display_name_,
description=cls._svc_description_,
userName=username,
password=password,
startType=start_type,
)
@classmethod
def remove(cls):
win32serviceutil.RemoveService(cls._svc_name_)
@classmethod
def start(cls):
win32serviceutil.StartService(cls._svc_name_)
@classmethod
def restart(cls):
win32serviceutil.RestartService(cls._svc_name_)
@classmethod
def stop(cls):
win32serviceutil.StopService(cls._svc_name_)
def service_class_factory(
cls_name,
name,
target=default_target,
display_name="",
description="",
run_in_foreground=False,
):
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
return type(
cls_name,
(_ServiceManager, object),
{
"__module__": mod.__name__,
"_svc_name_": name,
"_svc_display_name_": display_name or name,
"_svc_description_": description,
"run_in_foreground": run_in_foreground,
"target": target,
},
)
if HAS_WIN32:
test_service = service_class_factory("test_service", "test service")
SERVICE_SOURCE = """
from __future__ import absolute_import, unicode_literals
import logging
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
from tests.integration.utils.test_win_runas import service_class_factory
import salt.utils.win_runas
import sys
import yaml
OUTPUT = {}
USERNAME = '{}'
PASSWORD = '{}'
def target(service, *args, **kwargs):
service.log_info("target start")
if PASSWORD:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
password=PASSWORD,
)
else:
ret = salt.utils.win_runas.runas(
'cmd.exe /C OPENFILES',
username=USERNAME,
)
service.log_info("win_runas returned %s" % ret)
with salt.utils.files.fopen(OUTPUT, 'w') as fp:
yaml.dump(ret, fp)
service.log_info("target stop")
# This class will get imported and run as the service
test_service = service_class_factory('test_service', 'test service', target=target)
if __name__ == '__main__':
try:
test_service.stop()
except Exception as exc: # pylint: disable=broad-except
logger.debug("stop service failed, this is ok.")
try:
test_service.remove()
except Exception as exc: # pylint: disable=broad-except
logger.debug("remove service failed, this os ok.")
test_service.install()
sys.exit(0)
"""
def wait_for_service(name, timeout=200):
start = time.time()
while True:
status = win32serviceutil.QueryServiceStatus(name)
if status[1] == win32service.SERVICE_STOPPED:
break
if time.time() - start > timeout:
raise TimeoutError(
"Timeout waiting for service"
) # pylint: disable=undefined-variable
time.sleep(0.3)
@skipIf(not HAS_WIN32, "This test runs only on windows.")
class RunAsTest(ModuleCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.hostname = socket.gethostname()
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username, PASSWORD)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_admin_no_pass(self, username):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", username)
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_system_user(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "SYSTEM")
self.assertEqual(ret["stdout"], PRIV_STDOUT)
self.assertEqual(ret["stderr"], "")
self.assertEqual(ret["retcode"], 0)
def test_runas_network_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "NETWORK SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
def test_runas_local_service(self):
ret = salt.utils.win_runas.runas("cmd.exe /C OPENFILES", "LOCAL SERVICE")
self.assertEqual(ret["stdout"], "")
self.assertEqual(ret["stderr"], NOPRIV_STDERR)
self.assertEqual(ret["retcode"], 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_winrs_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_winrs_admin_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
def test_runas_winrs_system_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'SYSTEM')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 0)
def test_runas_winrs_network_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'NETWORK SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
def test_runas_winrs_local_service_user(self):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', 'LOCAL SERVICE')['retcode'])
"""
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"cmd.exe",
"/C",
"winrs",
"/r:{}".format(self.hostname),
"python",
RUNAS_PATH,
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"powershell",
"Invoke-Command",
"-ComputerName",
self.hostname,
"-ScriptBlock",
"{{ python.exe {} }}".format(RUNAS_PATH),
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_powershell_remoting_no_pass(self, username):
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
ret = subprocess.call(
[
"powershell",
"Invoke-Command",
"-ComputerName",
self.hostname,
"-ScriptBlock",
"{{ python.exe {} }}".format(RUNAS_PATH),
]
)
self.assertEqual(ret, 1)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin(self, username):
psrp_wrap = (
"powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit"
" $LASTEXITCODE"
)
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
password = '{}'
ret = salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username, password)
sys.exit(ret['retcode'])
""".format(
username, PASSWORD
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True) # nosec
self.assertEqual(ret, 0)
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_powershell_remoting_admin_no_pass(self, username):
psrp_wrap = (
"powershell Invoke-Command -ComputerName {} -ScriptBlock {{ {} }}; exit"
" $LASTEXITCODE"
)
runaspy = textwrap.dedent(
"""
import sys
import salt.utils.win_runas
username = '{}'
sys.exit(salt.utils.win_runas.runas('cmd.exe /C OPENFILES', username)['retcode'])
""".format(
username
)
)
with salt.utils.files.fopen(RUNAS_PATH, "w") as fp:
fp.write(runaspy)
cmd = "python.exe {}; exit $LASTEXITCODE".format(RUNAS_PATH)
ret = subprocess.call(psrp_wrap.format(self.hostname, cmd), shell=True) # nosec
self.assertEqual(ret, 0)
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas", on_existing="delete", delete=True, password=PASSWORD
)
def test_runas_service_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 1, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, PASSWORD)
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
@with_system_user(
"test-runas-admin",
on_existing="delete",
delete=True,
password=PASSWORD,
groups=["Administrators"],
)
def test_runas_service_admin_no_pass(self, username, timeout=200):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), username, "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
def test_runas_service_system_user(self):
if os.path.exists(RUNAS_OUT):
os.remove(RUNAS_OUT)
assert not os.path.exists(RUNAS_OUT)
runaspy = SERVICE_SOURCE.format(repr(RUNAS_OUT), "SYSTEM", "")
with salt.utils.files.fopen(RUNAS_PATH, "w", encoding="utf-8") as fp:
fp.write(runaspy)
ret = subprocess.call(["python.exe", RUNAS_PATH])
self.assertEqual(ret, 0)
win32serviceutil.StartService("test service")
wait_for_service("test service")
with salt.utils.files.fopen(RUNAS_OUT, "r") as fp:
ret = yaml.load(fp)
assert ret["retcode"] == 0, ret
|
fmfi-svt/votr
|
aisikl/components/action.py
|
from aisikl.events import action_event
from .component import Component, is_true
class Action(Component):
def __init__(self, dialog, id, type, parent_id, properties, element):
super().__init__(dialog, id, type, parent_id, properties, element)
self.accessible = properties.get('accessible', True)
self.tool_tip_text = properties.get('toolTipText')
self.shortcut = properties.get('sc')
self.action_list_id = properties.get('actionListId')
self.confirm_question = properties.get('confirmQuestion')
self.component_ids = properties.get('cids')
def get_components(self):
return [self.dialog.components[id] for id in self.component_ids]
def get_buttons_and_menu_items(self):
return [o for o in self.get_components()
if o.component_type in ('button', 'menuItem')]
def execute(self, original_source_name=None, params=None):
'''Executes the action and emits the appropriate event.'''
if not (self.accessible and self.enabled and self.enabled_in_ui and
self.visible and self.visible_in_ui):
# TODO: we should return here, but we can only do that once we
# properly support interactives. for now, the developer knows best.
pass
if not original_source_name:
self.log('action', 'Executing {}'.format(self.id))
ev = action_event(self, None, original_source_name or self.id, params)
# TODO: We should technically ask confirm_question before firing
# (if ev.listening is True), but we probably don't care.
self.dialog.app.send_events(ev)
def _ais_setAccessible(self, value):
self.accessible = is_true(value)
def _ais_setVisibleInUI(self, value):
super()._ais_setVisibleInUI(value)
for o in self.get_buttons_and_menu_items():
o._ais_setVisibleInUI(value)
def _ais_setEnabledInUI(self, value):
super()._ais_setEnabledInUI(value)
for o in self.get_buttons_and_menu_items():
o._ais_setEnabledInUI(value)
def _ais_setTitle(self, value):
for o in self.get_buttons_and_menu_items():
if o.title == self.title:
o._ais_setTitle(self, value)
super()._ais_setTitle(value)
def _ais_setToolTipText(self, value):
for o in self.get_buttons_and_menu_items():
if o.tool_tip_text == self.tool_tip_text:
o._ais_setToolTipText(value)
self.tool_tip_text = value
def _ais_setConfirmQuestion(self, value):
for o in self.get_buttons_and_menu_items():
if o.confirm_question == self.confirm_question:
o._ais_setConfirmQuestion(value)
self.confirm_question = value
|
PyThaiNLP/pythainlp
|
pythainlp/util/__init__.py
|
# -*- coding: utf-8 -*-
"""
Utility functions, like date conversion and digit conversion
"""
__all__ = [
"Trie",
"arabic_digit_to_thai_digit",
"bahttext",
"collate",
"countthai",
"delete_tone",
"dict_trie",
"digit_to_text",
"display_thai_char",
"emoji_to_thai",
"eng_to_thai",
"find_keyword",
"is_native_thai",
"isthai",
"isthaichar",
"normalize",
"now_reign_year",
"num_to_thaiword",
"rank",
"reign_year_to_ad",
"remove_dangling",
"remove_dup_spaces",
"remove_repeat_vowels",
"remove_tonemark",
"remove_zw",
"reorder_vowels",
"text_to_arabic_digit",
"text_to_thai_digit",
"thai_digit_to_arabic_digit",
"thai_keyboard_dist",
"thai_strftime",
"thai_time",
"thai_to_eng",
"thaiword_to_date",
"thaiword_to_num",
"thaiword_to_time",
"time_to_thaiword",
"text_to_num",
"words_to_num",
]
from pythainlp.util.collate import collate
from pythainlp.util.date import (
now_reign_year,
reign_year_to_ad,
thaiword_to_date,
)
from pythainlp.util.digitconv import (
arabic_digit_to_thai_digit,
digit_to_text,
text_to_arabic_digit,
text_to_thai_digit,
thai_digit_to_arabic_digit,
)
from pythainlp.util.keyboard import (
eng_to_thai,
thai_keyboard_dist,
thai_to_eng,
)
from pythainlp.util.emojiconv import emoji_to_thai
from pythainlp.util.keyboard import eng_to_thai, thai_to_eng
from pythainlp.util.keywords import find_keyword, rank
from pythainlp.util.normalize import (
delete_tone,
normalize,
maiyamok,
remove_dangling,
remove_dup_spaces,
remove_repeat_vowels,
remove_tonemark,
remove_zw,
reorder_vowels,
)
from pythainlp.util.numtoword import bahttext, num_to_thaiword
from pythainlp.util.strftime import thai_strftime
from pythainlp.util.thai import (
countthai,
display_thai_char,
isthai,
isthaichar,
)
from pythainlp.util.thaiwordcheck import is_native_thai
from pythainlp.util.time import thai_time, thaiword_to_time, time_to_thaiword
from pythainlp.util.trie import Trie, dict_trie
from pythainlp.util.wordtonum import thaiword_to_num, text_to_num, words_to_num
|
pkimber/checkout
|
example_checkout/urls.py
|
# -*- encoding: utf-8 -*-
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from .views import (
ExampleRefreshExpiryDatesFormView,
HomeView,
SalesLedgerChargeUpdateView,
SalesLedgerCheckoutSuccessView,
SalesLedgerCheckoutView,
SalesLedgerSessionRedirectView,
SettingsView,
)
admin.autodiscover()
urlpatterns = patterns(
'',
url(regex=r'^$',
view=HomeView.as_view(),
name='project.home'
),
url(regex=r'^settings/$',
view=SettingsView.as_view(),
name='project.settings'
),
url(regex=r'^',
view=include('login.urls')
),
url(regex=r'^admin/',
view=include(admin.site.urls)
),
url(regex=r'^checkout/',
view=include('checkout.urls')
),
url(regex=r'^contact/',
view=include('contact.urls')
),
url(r'^home/user/$',
view=RedirectView.as_view(
url=reverse_lazy('project.home'),
permanent=False
),
name='project.dash'
),
url(regex=r'^example/refresh/card/expiry/dates/$',
view=ExampleRefreshExpiryDatesFormView.as_view(),
name='example.refresh.card.expiry.dates'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/charge/$',
view=SalesLedgerChargeUpdateView.as_view(),
name='example.sales.ledger.charge'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/checkout/$',
view=SalesLedgerCheckoutView.as_view(),
name='example.sales.ledger.checkout'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/checkout/success/$',
view=SalesLedgerCheckoutSuccessView.as_view(),
name='example.sales.ledger.checkout.success'
),
url(regex=r'^sales/ledger/(?P<pk>\d+)/session/redirect/$',
view=SalesLedgerSessionRedirectView.as_view(),
name='example.sales.ledger.session.redirect'
),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# ^ helper function to return a URL pattern for serving files in debug mode.
# https://docs.djangoproject.com/en/1.5/howto/static-files/#serving-files-uploaded-by-a-user
urlpatterns += staticfiles_urlpatterns()
|
lukas-ke/faint-graphics-editor
|
build-sys/build_sys/gen_text_expressions.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import build_sys.util.util as util
from build_sys.cpp_writer import Code
"""Generates the simple constant text-expressions and
documentation."""
COMMANDS = [
# Command, character, docs
('alpha', 'utf8_char(0x03b1)', 'Greek small letter alpha'),
('angle', 'utf8_char(0x2220)', 'Angle-symbol'),
('beta', 'utf8_char(0x03b2)', 'Greek small letter beta'),
('deg', 'chars::degree_sign', 'Degree sign'),
('delta', 'chars::greek_capital_letter_delta', 'Greek capital delta'),
('dog', 'utf8_char(0x1f415)', 'Dog symbol'),
('dprime', 'chars::double_prime', 'Double prime'),
('ellipsis', 'utf8_char(0x22ef)', 'Ellipsis symbol'),
('ge', 'utf8_char(0x2265)', 'Greater or equal sign'),
('in', 'chars::double_prime', 'Inch symbol (double prime)'),
('inf', 'utf8_char(0x221e)', 'Infinity symbol'),
('interrobang', 'utf8_char(0x203d)', 'Surprised exclamation'),
('larr', 'utf8_char(0x2190)', 'Left arrow'),
('le', 'utf8_char(0x2264)', 'Less or equal sign'),
('li', 'chars::bullet', 'Bullet point'),
('lrarr', 'utf8_char(0x2194)', 'Left and right arrow'),
('ne', 'utf8_char(0x2260)', 'Not equal sign'),
('notparallel', 'utf8_char(0x2226)', 'Not parallel symbol'),
('parallel', 'utf8_char(0x2225)', 'Parallel symbol'),
('perfect', 'utf8_char(0x1f44c)', 'Perfect hand gesture'),
('pi', 'chars::greek_small_letter_pi', 'Greek small pi'),
('poop', 'utf8_char(0x1f4a9)', 'Pile of poop symbol'),
('prime', 'chars::prime', 'Prime symbol'),
('rarr', 'utf8_char(0x2192)', 'Right arrow'),
('scissors', 'utf8_char(0x2702)', 'A pair of scissors'),
('sq', 'chars::superscript_two', 'Superscript two'),
('sqrt', 'utf8_char(0x221a)', 'Square-root symbol'),
('times', 'utf8_char(0xD7)', 'Multiplication symbol'),
('tm', 'utf8_char(0x2122)', 'Trademark symbol'),
('tprime', 'chars::triple_prime', 'Triple prime'),
('whoa', 'utf8_char(0x1f450)', 'Raised hands')]
def get_header_code():
hh = Code()
hh.line('// Generated by %s\n' % os.path.basename(__file__))
hh.line('namespace faint{')
hh.line('const std::map<utf8_string, utf8_string>& constant_exprs(){')
hh.line('static std::map<utf8_string, utf8_string> constants =')
hh.line('{')
for c in COMMANDS[:-1]:
hh.line('{"%s", utf8_string(%s)},' % (c[0], c[1]))
c = COMMANDS[-1]
hh.line('{"%s", utf8_string(%s)}' % (c[0], c[1]))
hh.line('};')
hh.line('return constants;')
hh.line('}')
hh.line('} // namespace')
return hh
def get_help():
help = '# Generated by %s\n' % os.path.basename(__file__)
help += '||*Command*||*Symbol*||*Description*||\n'
for c in COMMANDS:
help += ('||\\%s||\\image(symbol-%s.png)||%s||\n' % (c[0], c[0], c[2]))
return help
def generate_header(file_path):
with open(file_path, 'w', newline='\n') as f:
f.write(str(get_header_code()));
def generate_help(file_path):
"""Writes a help-source-file documenting the commands"""
with open(file_path, 'w', newline='\n') as f:
f.write(get_help())
def generate(hh_path, help_path):
if util.changed(__file__, hh_path):
generate_header(hh_path)
if util.changed(__file__, help_path):
generate_help(help_path)
|
google-research/unique-randomizer
|
unique_randomizer/stochastic_beam_search.py
|
# Copyright 2020 The UniqueRandomizer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Stochastic Beam Search (SBS).
The technique is described in the following paper:
Wouter Kool, Herke van Hoof, and Max Welling.
Stochastic Beams and Where to Find Them: The Gumbel-Top-k Trick for Sampling
Sequences Without Replacement.
https://arxiv.org/pdf/1903.06059.pdf
The implementation is slightly generalized from the description in the paper,
handling the case where not all leaves are at the same level of the tree.
"""
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
State = Any # Type alias. pylint: disable=invalid-name
Output = Any # Type alias. pylint: disable=invalid-name
BeamNode = typing.NamedTuple('BeamNode', [('output', Output),
('log_probability', float),
('gumbel', float)])
def sample_gumbels_with_maximum(log_probabilities, target_max):
"""Samples a set of gumbels which are conditioned on having a given maximum.
Based on https://gist.github.com/wouterkool/a3bb2aae8d6a80f985daae95252a8aa8.
Args:
log_probabilities: The log probabilities of the items to sample Gumbels for.
target_max: The desired maximum sampled Gumbel.
Returns:
The sampled Gumbels.
"""
gumbels = np.random.gumbel(loc=log_probabilities)
max_gumbel = np.max(gumbels)
# Use equations (23) and (24) in Appendix B.3 of the SBS paper.
# Note: Numpy may warn "divide by zero encountered in log1p" on the next code
# line. This is normal and expected, since one element of
# `gumbels - max_gumbel` should be zero. The math fixes itself later on, and
# that element ends up being shifted to target_max.
v = target_max - gumbels + np.log1p(-np.exp(gumbels - max_gumbel))
return target_max - np.maximum(v, 0) - np.log1p(np.exp(-np.abs(v)))
def stochastic_beam_search(
child_log_probability_fn: Callable[[List[State]], List[np.ndarray]],
child_state_fn: Callable[[List[Tuple[State, int]]],
List[Tuple[Union[State, Output], bool]]],
root_state: State,
k: int) -> List[BeamNode]:
"""Stochastic Beam Search.
Nodes in the beam include "states" which can be anything but must contain
enough information to:
1. Define a consistent ordering of all children of the node.
2. Enumerate the probabilities of all children.
3. Produce the state of the child with a given index.
Args:
child_log_probability_fn: A function that takes a list of states and returns
the log probabilities of the child states of each input state.
child_state_fn: A function that takes a list of (state, i) pairs and maps
each to a (ith_child, is_leaf) pair. If ith_child is a leaf state, is_leaf
should be True, and ith_child will potentially be an actual sampled item
that should be returned by stochastic_beam_search (it may have a different
form than other non-leaf states).
root_state: The state of the root node. This cannot be a leaf node.
k: The desired number of samples.
Returns:
A list of up to k BeamNode objects, corresponding to the sampled leaves.
"""
if k <= 0:
return []
# Data for nodes currently on the beam.
leaf_log_probs = []
leaf_gumbels = []
leaf_outputs = []
internal_states = [root_state]
internal_log_probs = [0.0]
internal_gumbels = [0.0]
# Expand internal nodes until there are none left to expand.
while internal_states:
# Compute child probabilities for all internal nodes.
child_log_probs_list = child_log_probability_fn(internal_states)
# Avoid creating tons of BeamNode objects for children of internal nodes
# (there may be beam_size*node_arity children). Instead pack data into lists
# for efficiency.
all_log_probs = []
all_gumbels = []
all_states = []
all_child_indices = []
# Sample Gumbels for children of internal nodes.
for node_state, node_log_prob, node_gumbel, child_log_probs in zip(
internal_states, internal_log_probs, internal_gumbels,
child_log_probs_list):
# Note: Numpy may warn "divide by zero encountered in log" on the next
# code line. This is normal and expected if a child has zero probability.
# We prevent zero-probability children from being added to the beam.
log_probabilities = child_log_probs + node_log_prob
good_indices = np.where(log_probabilities != np.NINF)[0]
log_probabilities = log_probabilities[good_indices]
gumbels = sample_gumbels_with_maximum(log_probabilities, node_gumbel)
all_log_probs.extend(log_probabilities)
all_gumbels.extend(gumbels)
all_states.extend([node_state] * len(log_probabilities))
all_child_indices.extend(good_indices)
# Select the k best candidates.
num_internal_candidates = len(all_gumbels)
num_leaf_candidates = len(leaf_gumbels)
if k >= num_internal_candidates + num_leaf_candidates:
# No change to leaf nodes, since all are selected.
to_expand_states = list(zip(all_states, all_child_indices))
to_expand_log_probs = all_log_probs
to_expand_gumbels = all_gumbels
else:
# Select the unsorted top k in O(num_candidates) time.
all_gumbels.extend(leaf_gumbels)
top_k_indices = np.argpartition(all_gumbels, -k)[-k:]
to_expand_states = []
to_expand_log_probs = []
to_expand_gumbels = []
leaf_indices = []
for i in top_k_indices:
if i >= num_internal_candidates:
leaf_indices.append(i - num_internal_candidates)
else:
to_expand_states.append((all_states[i], all_child_indices[i]))
to_expand_log_probs.append(all_log_probs[i])
to_expand_gumbels.append(all_gumbels[i])
leaf_log_probs = [leaf_log_probs[i] for i in leaf_indices]
leaf_gumbels = [leaf_gumbels[i] for i in leaf_indices]
leaf_outputs = [leaf_outputs[i] for i in leaf_indices]
# Among selected candidates, expand non-leaf nodes.
internal_log_probs = []
internal_gumbels = []
internal_states = []
child_states = child_state_fn(to_expand_states)
for log_prob, gumbel, (child_state, is_leaf) in zip(
to_expand_log_probs, to_expand_gumbels, child_states):
if is_leaf:
leaf_log_probs.append(log_prob)
leaf_gumbels.append(gumbel)
leaf_outputs.append(child_state)
else:
internal_log_probs.append(log_prob)
internal_gumbels.append(gumbel)
internal_states.append(child_state)
# Pack the leaf data into BeamNode objects.
sampled_nodes = []
for log_prob, gumbel, output in zip(
leaf_log_probs, leaf_gumbels, leaf_outputs):
sampled_nodes.append(BeamNode(output=output, log_probability=log_prob,
gumbel=gumbel))
# Sort the beam in order of decreasing Gumbels. This corresponds to the order
# one would get by sampling one-at-a-time without replacement.
return sorted(sampled_nodes, key=lambda x: x.gumbel, reverse=True)
|
tellproject/helper_scripts
|
mbclient.py
|
#!/usr/bin/env python
import os
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Storage
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import Cassandra
from ServerConfig import Microbench
def startMBClient(populate = False, uoutFile = None):
default_out = ""
if Storage.storage == TellStore:
default_out = "mbench_{0}".format(TellStore.approach)
elif Storage.storage == Kudu:
default_out = "mbench_kudu"
elif Storage.storage == Cassandra:
default_out = "mbench_cassandra"
default_out = '{0}/{1}_sf{2}_N{3}'.format(Microbench.result_dir, default_out, Microbench.scaling, Microbench.numColumns)
if (uoutFile):
outfile = uoutFile
else:
outfile = default_out
appendFile = 0
while os.path.isfile(outfile + ".db"):
appendFile = appendFile + 1
outfile = "{0}_{1}".format(outfile, appendFile)
probabilities = "-i {0} -d {1} -u {2}".format(Microbench.insertProb, Microbench.deleteProb, Microbench.updateProb)
cmd = '{0}/watch/microbench/mbclient -H "{1}" -s {2} -c {3} -t {4} -a {5} -o {6} -b {7} -w {8} {9}'.format(TellStore.builddir, Microbench.getServerList(), Microbench.scaling, Microbench.clients, Microbench.clientThreads, Microbench.analyticalClients, outfile + ".db", Microbench.txBatch, Microbench.oltpWaitTime, probabilities)
if Microbench.onlyQ1:
cmd += ' -q'
if Microbench.noWarmUp:
cmd += " --no-warmup"
if (populate):
cmd += ' -P'
print "Execute {0}".format(cmd)
return os.system(cmd)
if __name__ == "__main__":
default_out = ''
parser = ArgumentParser()
parser.add_argument("-P", dest='populate', help="Populate data", action="store_true")
parser.add_argument("outfile", help="Result database", default=default_out, nargs='?')
args = parser.parse_args()
if (default_out != ''):
exit(startMBClient(args.populate, default_out))
else:
exit(startMBClient(args.populate))
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/batch_allreduce.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes and functions for doing a single-machine batch all-reduce.
An all-reduce is taking the reduction (typically a sum) of a list of tensors,
each on a different device. The result must end up back on each device, which is
where the word "all" comes from. In summary, each device starts with a single
tensor, and ends up with the reduction of all tensors.
A batch all-reduce is doing several independent all-reduces. When doing a batch
all-reduce, care is taken to evenly distribute the reduction computations
across devices and inter-device tensor transfers across device links.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(reedwm): Support distributed all-reduces in this file.
# TODO(reedwm): Merge this code with allreduce.py, which contains some batch
# all-reduce code that this file calls. allreduce.py also supports distributed
# batch-reduce while this file only supports single-machine all-reduce.
import abc
import six
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import data_flow_ops
import allreduce
import constants
def _all_reduce_using_copy(tensors_across_devices, use_mean):
"""Does an all-reduce of a list of tensors by copying to the current device.
The tensors are copied to the current device and then reduced.
Args:
tensors_across_devices: A list of tensors, each on a different device.
use_mean: Whether to take the mean of the tensors instead of a sum:
Returns:
A reduced tensor on the current device.
"""
reduced_tensor = tf.add_n(tensors_across_devices)
if use_mean:
reduced_tensor *= 1 / len(tensors_across_devices)
return reduced_tensor
@six.add_metaclass(abc.ABCMeta)
class BatchAllReduceAlgorithm(object):
"""Represents an algorithm for performing a batch all-reduce operation."""
def batch_all_reduce(self,
all_device_tensors,
num_splits,
compact_tensors,
defer_tensors,
xla_compile=False):
"""Performs a batch all-reduce.
The reduction done is a sum.
`all_device_tensors` is a list of list of tensors that will be batch
all-reduced. All tensors within a single inner list must be on the same
device. The nth element in each list, for any n, will be reduced together.
The return value is in the same form as `all_device_tensors`, except that
each tensor is reduced.
For example, if `all_device_tensors` is:
[[ A, B ], # A and B are on GPU 0
[ C, D ]] # C and D are on GPU 1
Then the return value will be:
[[ A+C, B+D ], # These two tensors are on GPU 0
[ A+C, B+D ]] # These two tensors are on GPU 1
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
num_splits: If not None, tensors will be concatenated and split into this
many pieces during the all-reduce, then split back into their original
shapes afterwards. Has no impact on correctness and can improve
performance. Requires all tensors to be the same type.
compact_tensors: If True, tensors are casted to fp16 before being all-
reduced. Improves performance, but hurts numerical stability.
defer_tensors: If True, every time the return value
`reduced_all_device_tensors` is evaluated, the result will be the
reduced tensors values of `all_device_tensors` from the previous session
run instead of the current session run, or zero on the first session
run. This can improve performance. When training neural networks,
deferring gradients often does not harm training, so this can be used to
improve performance.
xla_compile: If True, use XLA to compile gradients packing and unpacking
ops.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
warmup_ops: A list of ops needed to be run once before the all-reduce can
occur.
"""
# Before all-reducing tensors, we do several preprocessing functions that
# can speed up the all-reduce. We undo these functions after all-reducing
# the tensors.
# all_device_packed_tensors is a 2-d list of tensors indexed by
# [device_id][tensor_id], holding packed tensors from all devices involved
# in all-reduce.
all_device_packed_tensors = []
# all_device_warmup_ops is a 2-d list of ops indexed by
# [device_id][tensor_id], holding warmup_ops that need to be run once before
# all-reduce can occur.
all_device_warmup_ops = []
# all_device_put_ops is a 2-d list of ops indexed by
# [device_id][tensor_id], holding put ops for deferred tensors. They will be
# called in each all-reduce step automatically due to control dependency.
all_device_put_ops = []
# packers is a list of _TensorPacker, one for each device involved in
# all-reduce.
packers = [
_TensorPacker(num_splits, compact_tensors) for _ in all_device_tensors
]
for packer, device_tensors in zip(packers, all_device_tensors):
def pack_single_device_tensors(packer=packer,
device_tensors=device_tensors):
"""Pack gradient tensors of a device."""
packed_tensors = packer.maybe_concat_tensors(device_tensors)
packed_tensors = packer.maybe_compact_tensors(packed_tensors)
# When xla_compile=False, defer tensors after concat for better
# performance.
if defer_tensors and not xla_compile:
packed_tensors, put_ops, warmup_ops = defer_single_device_tensors(
packed_tensors)
all_device_put_ops.append(put_ops)
all_device_warmup_ops.append(warmup_ops)
packed_tensors = packer.maybe_split_tensors(packed_tensors)
return packed_tensors
with tf.device(device_tensors[0].device):
if xla_compile:
packed_tensors = tf.xla.experimental.compile(
pack_single_device_tensors)
# When xla_compile=True, intermediate tensors in packing process are
# not materialized. Thus, we defer tensors after packing process is
# completed instead of in the middle of it.
if defer_tensors:
packed_tensors, put_ops, warmup_ops = defer_single_device_tensors(
packed_tensors)
all_device_put_ops.append(put_ops)
all_device_warmup_ops.append(warmup_ops)
else:
packed_tensors = pack_single_device_tensors()
all_device_packed_tensors.append(packed_tensors)
# Perform all-reduce on packed tensors.
all_device_tensors = self._do_batch_all_reduce(all_device_packed_tensors)
all_device_unpacked_tensors = []
for packer, device_tensors in zip(packers, all_device_tensors):
def unpack_single_device_tensors(packer=packer,
device_tensors=device_tensors):
"""Unpack gradient tensors of a device."""
unpacked_tensors = packer.undo_maybe_split_tensors(device_tensors)
unpacked_tensors = packer.undo_maybe_compact_tensors(unpacked_tensors)
unpacked_tensors = packer.undo_maybe_concat_tensors(unpacked_tensors)
return unpacked_tensors
with tf.device(device_tensors[0].device):
if xla_compile:
unpacked_device_tensor = tf.xla.experimental.compile(
unpack_single_device_tensors)
else:
unpacked_device_tensor = unpack_single_device_tensors()
all_device_unpacked_tensors.append(unpacked_device_tensor)
# Note: There is no undo operation for deferring tensors. But we do need to
# call _add_put_op_control_deps at the end if we deferred the tensors.
if defer_tensors:
all_device_unpacked_tensors = _add_put_op_control_deps(
all_device_unpacked_tensors, num_splits, all_device_put_ops)
return all_device_unpacked_tensors, all_device_warmup_ops
@abc.abstractmethod
def _do_batch_all_reduce(self, all_device_tensors):
"""Performs a batch all-reduce.
Unlike `self.batch_all_reduce`, this does not do any preprocessing of the
tensors.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
Returns:
reduced_all_device_tensors: A list in the same form as
`all_device_tensors`, except each tensor has been reduced.
"""
pass
class CopyToDeviceAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that copies tensors to be reduced to a specific device."""
def __init__(self, devices_to_reduce_on, use_mean=False):
self._devices = devices_to_reduce_on
self._use_mean = use_mean
def _do_batch_all_reduce(self, all_device_tensors):
reduced_tensors = []
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
with tf.device(self._devices[i % len(self._devices)]):
reduced_tensor = _all_reduce_using_copy(tensors_across_devices,
self._use_mean)
reduced_tensors.append(reduced_tensor)
# The tensors will be brought back to each device once they are used.
return [reduced_tensors] * len(all_device_tensors)
class HierarchicalCopyAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses hierarchical copies. This is only optimized for
eight devices connected in NetworkTopology.DGX1 or NetworkTopology.GCP_V100
topology.
"""
def __init__(self, network_topology):
"""Initializer for HierarchicalCopyAlgorithm.
Args:
network_topology: An instance of Enum class constants.NetworkTopology.
"""
self._network_topology = network_topology
def _do_batch_all_reduce(self, all_device_tensors):
avail_devices = [device_tensors[0].device
for device_tensors in all_device_tensors]
reduced_tensors = []
num_devices = len(avail_devices)
group_size = num_devices // 2
for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
group_0_main_device, group_1_main_device = self.__get_main_devices(
i, num_devices)
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Reduce the first group.
group_0_tensors = tensors_across_devices[group_0_begin:
group_0_begin + group_size]
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor = _all_reduce_using_copy(group_0_tensors, False)
# Reduce the second group.
group_1_tensors = tensors_across_devices[group_1_begin:
group_1_begin + group_size]
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor = _all_reduce_using_copy(group_1_tensors, False)
# Reduce between the groups.
with tf.device(avail_devices[group_0_main_device]):
total_reduced_tensor = _all_reduce_using_copy(
[group_0_reduced_tensor, group_1_reduced_tensor], False)
# Broadcast the result back into the root of each group.
with tf.device(avail_devices[group_0_main_device]):
group_0_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
with tf.device(avail_devices[group_1_main_device]):
group_1_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
reduced_tensors_bcast = []
for j in range(len(tensors_across_devices)):
with tf.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_tensor = group_0_reduced_tensor_bcast
else:
src_device_tensor = group_1_reduced_tensor_bcast
reduced_tensors_bcast.append(tf.identity(src_device_tensor))
reduced_tensors.append(reduced_tensors_bcast)
reduced_tensors = list(zip(*reduced_tensors))
return reduced_tensors
def __get_main_devices(self, tensor_index, num_devices):
"""Returns the pair of main devices to use for initial reduction.
Args:
tensor_index: Index of the current tensor in the list of tensors to copy.
num_devices: Total number of devices.
Returns:
A tuple containing pair of main device indices for the initial
reduction. Then, the first element of the tuple should be used for the
final reduction.
Raises:
ValueError: Invalid input arguments.
"""
if self._network_topology == constants.NetworkTopology.DGX1:
return tensor_index % num_devices, (tensor_index +
(num_devices // 2)) % num_devices
elif self._network_topology == constants.NetworkTopology.GCP_V100:
if num_devices != 8:
raise ValueError('HierarchicalCopy only supports eight devices in %s.' %
self._network_topology)
# TODO(hinsu): Generalize main device indices to handle any other
# isomorphic connection graph that connects two cliques using connections
# other than 0-5 and 2-7.
main_device_pairs = [(0, 5), (2, 7), (5, 0), (7, 2)]
return main_device_pairs[tensor_index % len(main_device_pairs)]
else:
# TODO(reedwm): make this logic more general for arbitrary topology.
raise ValueError(
'HierarchicalCopy is not supported for %s network topology.' %
self._network_topology)
class AllReduceSpecAlgorithm(BatchAllReduceAlgorithm):
"""An algorithm that uses an all reduce spec."""
def __init__(self, all_reduce_spec, gpu_indices, agg_small_grads_max_bytes,
agg_small_grads_max_group):
spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
if len(spec) != 1:
raise ValueError(
'Replicated mode does not support hybrid all-reduce strategies')
self._all_reduce_spec = spec[0]
self._gpu_indices = gpu_indices
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
def _do_batch_all_reduce(self, all_device_tensors):
# TODO(reedwm): Merge allreduce.sum_gradients_all_reduce with the other
# gradient aggregation code, since gradient aggregation is doing an all
# reduce. Currently, we do gradient repacking in two different places.
# TODO(reedwm): Change the allreduce code to reduce tensors instead of
# tower_grads.
tower_grads = [[(t, None) for t in device_tensors]
for device_tensors in all_device_tensors]
aggregated_device_grads = allreduce.sum_gradients_all_reduce(
False, # single_session
['/job:localhost'],
tower_grads,
1,
self._all_reduce_spec.alg,
self._all_reduce_spec.shards,
self._gpu_indices,
agg_small_grads_max_bytes=self._agg_small_grads_max_bytes,
agg_small_grads_max_group=self._agg_small_grads_max_group)
return [[t for t, _ in grad_vars] for grad_vars in aggregated_device_grads]
def algorithm_from_params(params):
"""Returns a BatchAllReduceAlgorithm from a Params tuple."""
if params.all_reduce_spec:
if params.gpu_indices:
gpu_indices = [int(x) for x in params.gpu_indices.split(',')]
else:
gpu_indices = [x for x in range(params.num_gpus)]
return AllReduceSpecAlgorithm(params.all_reduce_spec, gpu_indices,
params.agg_small_grads_max_bytes,
params.agg_small_grads_max_group)
elif params.hierarchical_copy:
return HierarchicalCopyAlgorithm(params.network_topology)
else:
if params.local_parameter_device == 'gpu':
devices_to_reduce_on = ['/gpu:%d' % i for i in range(params.num_gpus)]
else:
devices_to_reduce_on = ['/cpu:0']
return CopyToDeviceAlgorithm(devices_to_reduce_on)
def _apply_to_all_device_tensors(all_device_tensors, apply_func, colocate=True):
"""Applies a function to each tensor in `all_device_tensors`.
A new list of lists of tensors is returned, where every tensor in
`all_device_tensors` has had `apply_func` called on it. `all_device_tensors`
is not modified.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
apply_func: A function taking in three arguments: tensor, device_index,
tensor_index, and returning a modified tensor.
`tensor` is `all_device_tensors[device_index][tensor_index]`.
colocate: If True, apply_func will be run under context manager colocated
with it's input tensor.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has had
`apply_func` called on it.
"""
new_all_device_tensors = []
for device_index, device_tensors in enumerate(all_device_tensors):
new_device_tensors = []
for tensor_index, t in enumerate(device_tensors):
if colocate:
with tf.colocate_with(t):
new_t = apply_func(t, device_index, tensor_index)
else:
new_t = apply_func(t, device_index, tensor_index)
new_device_tensors.append(new_t)
new_all_device_tensors.append(new_device_tensors)
return new_all_device_tensors
def _defer_tensor(tensor):
"""Defers the retrieval of a tensor.
The tensor is put into a StagingArea, and the return value is the
retrieval of the tensor from the StagingArea. The effect is that the
tensor returned from this function is the tensor that was put in the
StagingArea for the previous Session.run() call.
Args:
tensor: The tensor to defer for one step.
Returns:
deferred_tensor: The tensor deferred for one step.
put_op: An op to put `tensor` in the StagingArea. Must be run every step
that `deferred_tensor` is run.
warmup_op: A warmup op that should be called before the first step. Puts
a zero tensor into the StagingArea.
"""
tensor_stage = data_flow_ops.StagingArea([tensor.dtype], [tensor.shape])
put_op = tensor_stage.put([tensor])
warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# Fetch the next tensor to use.
(tensor,) = tensor_stage.get()
return tensor, put_op, warmup_op
def defer_single_device_tensors(device_tensors):
"""Defer tensors (gradients in this case) from a single device.
Args:
device_tensors: A list of gradients tensors from a single device to defer.
Returns:
deferred_tensors: A list of tensors deferred for one step.
put_ops: A list of ops that put `tensors` in the StagingAreas. Must be run
every step that `deferred_tensors` is run.
warmup_ops: Warmup ops that should be called before the first step. Puts
zero tensors into the StagingArea.
"""
put_ops = []
warmup_ops = []
deferred_tensors = []
for tensor in device_tensors:
deferred_tensor, put_op, warmup_op = _defer_tensor(tensor)
deferred_tensors.append(deferred_tensor)
put_ops.append(put_op)
warmup_ops.append(warmup_op)
return deferred_tensors, put_ops, warmup_ops
def _add_put_op_control_deps(all_device_tensors, num_splits, put_ops):
"""Add control dependencies from `put_ops` to `all_device_tensors`.
This should only be called when deferred tensors are being used.
The control dependencies are added so that the put ops are run whenever
`all_device_tensors` is run. That way, the caller does not have to explicitly
run the put ops.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
a tensor where `i` is the device index and `j` is the tensor index.
num_splits: The number of splits that were used for the all-reduce.
put_ops: A list of put ops from deferring the tensors.
Returns:
A list in the same form as `all_device_tensors`, except each tensor has a
control dependency on an op in `put_ops`.
"""
def apply_func(tensor, device_index, tensor_index):
if num_splits == 0:
deps = [put_ops[device_index][tensor_index]]
else:
deps = put_ops[device_index]
assert len(deps) == 1
with tf.control_dependencies(deps):
return tf.identity(tensor, name='control_dependency')
return _apply_to_all_device_tensors(all_device_tensors, apply_func)
class _TensorPacker(object):
"""Packs and unpacks tensors into groups.
This class first concatenates a set of tensors, then split the concatenated
tensor into a small number of chunks. This is useful for all-reducing tensors,
as doing a small number of all-reduces on large tensors can be faster than
doing a large number of all-reduces on small tensors.
It also provides option to compact tensors by casting them to fp16, for better
all-reduce performance.
This class maintains states of processed tensors like shapes and types. So
each packer can only be used to pack and unpack one list of tensors. If you
need to pack multiple lists of tensors (say from multiple devices), then you
need multiple _TensorPacker object, one for each device.
"""
def __init__(self, num_splits, compact):
"""Initializes the _TensorPacker.
Args:
num_splits: The number of tensors to split the concatenated tensor into.
The batch all-reduce will consist of `num_splits` all-reduces. if None
or zero, tensors are not split or concatenated.
compact: If True, tensors are casted to fp16 during packing and casted
back to their original dtypes during unpacking.
"""
self._num_splits = num_splits
self._compact = compact
self._before_compact_dtypes = []
def maybe_concat_tensors(self, device_tensors):
"""Concatenate tensors into a single tensor."""
if not self._num_splits:
return device_tensors
flat_tensors = [tf.reshape(t, [-1]) for t in device_tensors]
self._orig_shapes = [t.shape for t in device_tensors]
self._orig_sizes = [s.num_elements() for s in self._orig_shapes]
# All shapes must be fully defined.
assert None not in self._orig_sizes
concatenated_grad = tf.concat(flat_tensors, 0)
return [concatenated_grad]
def maybe_split_tensors(self, concatenated_tensor):
"""Split concatenated tensor into `num_splits` pieces."""
if not self._num_splits:
return concatenated_tensor
if len(concatenated_tensor) != 1:
raise RuntimeError('tensors must be concatenated via '
'maybe_concat_tensors() before splitting')
concatenated_tensor = concatenated_tensor[0]
total_tensor_size = concatenated_tensor.shape.num_elements()
split_size = total_tensor_size // self._num_splits
split_size_last = total_tensor_size - split_size * (self._num_splits - 1)
split_sizes = [split_size] * (self._num_splits - 1) + [split_size_last]
tensor_packs = tf.split(concatenated_tensor, split_sizes)
return tensor_packs
def undo_maybe_split_tensors(self, tensor_packs):
"""Undo maybe_split_tensors()."""
if not self._num_splits:
return tensor_packs
return [tf.concat(tensor_packs, 0)]
def undo_maybe_concat_tensors(self, concatenated_tensor):
"""Undo maybe_concat_tensors()."""
if not self._num_splits:
return concatenated_tensor
if len(concatenated_tensor) != 1:
raise RuntimeError(
'undo_maybe_split_tensors() must be called before '
'undo_maybe_concat_tensors when num_splits is greater than 1')
concatenated_tensor = concatenated_tensor[0]
tensors_with_sizes = tf.split(concatenated_tensor,
self._orig_sizes)
tensors_with_shapes = [
tf.reshape(grad, shape) for grad, shape in zip(
tensors_with_sizes, self._orig_shapes)
]
return tensors_with_shapes
def maybe_compact_tensors(self, device_tensors):
"""Cast tensors to fp16 and store their original types."""
if not self._compact:
return device_tensors
if self._before_compact_dtypes:
raise RuntimeError('maybe_compact_tensors can only be called once.')
self._before_compact_dtypes = [t.dtype for t in device_tensors]
compact_tensors = [tf.cast(t, tf.float16) for t in device_tensors]
return compact_tensors
def undo_maybe_compact_tensors(self, compact_tensors):
"""Undo maybe_compact_tensors()."""
if not self._compact:
return compact_tensors
if not self._before_compact_dtypes:
raise RuntimeError('maybe_compact_tensors() must be called before '
'undo_maybe_compact_tensors()')
device_tensors = [
tf.cast(t, dtype)
for t, dtype in zip(compact_tensors, self._before_compact_dtypes)
]
return device_tensors
|
red-hood/calendarserver
|
contrib/od/test/test_live.py
|
##
# Copyright (c) 2014-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
OpenDirectory live service tests.
"""
from __future__ import print_function
from itertools import chain
from uuid import UUID
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
try:
from twext.who.opendirectory import DirectoryService
moduleImported = True
except:
moduleImported = False
print("Could not import OpenDirectory")
if moduleImported:
from twext.who.expression import (
CompoundExpression, Operand, MatchExpression, MatchType, MatchFlags
)
from txdav.who.directory import CalendarDirectoryServiceMixin
from txdav.who.opendirectory import DirectoryService as OpenDirectoryService
class CalOpenDirectoryService(OpenDirectoryService, CalendarDirectoryServiceMixin):
pass
LOCAL_SHORTNAMES = "odtestalbert odtestbill odtestcarl odtestdavid odtestsubgroupa".split()
NETWORK_SHORTNAMES = "odtestamanda odtestbetty odtestcarlene odtestdenise odtestsubgroupb odtestgrouptop".split()
def onlyIfPopulated(func):
"""
Only run the decorated test method if the "odtestamanda" record exists
"""
@inlineCallbacks
def checkThenRun(self):
record = yield self.service.recordWithShortName(self.service.recordType.user, u"odtestamanda")
if record is not None:
result = yield func(self)
returnValue(result)
else:
print("OD not populated, skipping {}".format(func.func_name))
return checkThenRun
class LiveOpenDirectoryServiceTestCase(unittest.TestCase):
"""
Live service tests for L{DirectoryService}.
"""
def setUp(self):
self.service = DirectoryService()
def tearDown(self):
self.service._deletePool()
def verifyResults(self, records, expected, unexpected):
shortNames = []
for record in records:
for shortName in record.shortNames:
shortNames.append(shortName)
for name in expected:
self.assertTrue(name in shortNames)
for name in unexpected:
self.assertFalse(name in shortNames)
@onlyIfPopulated
@inlineCallbacks
def test_shortNameStartsWith(self):
records = yield self.service.recordsFromExpression(
MatchExpression(
self.service.fieldName.shortNames, u"odtest",
matchType=MatchType.startsWith
)
)
self.verifyResults(
records,
chain(LOCAL_SHORTNAMES, NETWORK_SHORTNAMES),
["anotherodtestamanda", "anotherodtestalbert"]
)
@onlyIfPopulated
@inlineCallbacks
def test_uid(self):
for uid, name in (
(u"9DC04A71-E6DD-11DF-9492-0800200C9A66", u"odtestbetty"),
(u"9DC04A75-E6DD-11DF-9492-0800200C9A66", u"odtestbill"),
):
record = yield self.service.recordWithUID(uid)
self.assertTrue(record is not None)
self.assertEquals(record.shortNames[0], name)
@onlyIfPopulated
@inlineCallbacks
def test_guid(self):
for guid, name in (
(UUID("9DC04A71-E6DD-11DF-9492-0800200C9A66"), u"odtestbetty"),
(UUID("9DC04A75-E6DD-11DF-9492-0800200C9A66"), u"odtestbill"),
):
record = yield self.service.recordWithGUID(guid)
self.assertTrue(record is not None)
self.assertEquals(record.shortNames[0], name)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithoutRecordType(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(expression)
# We should get back users and groups since we did not specify a type:
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithExplicitRecordType(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(
expression, recordTypes=[self.service.recordType.user]
)
# We should get back users but not groups:
self.verifyResults(
records,
["odtestbetty", "odtestalbert", "anotherodtestalbert"],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_compoundWithMultipleExplicitRecordTypes(self):
expression = CompoundExpression(
[
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"be",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"be",
matchType=MatchType.startsWith
),
],
Operand.OR
),
CompoundExpression(
[
MatchExpression(
self.service.fieldName.fullNames, u"test",
matchType=MatchType.contains
),
MatchExpression(
self.service.fieldName.emailAddresses, u"test",
matchType=MatchType.startsWith
),
],
Operand.OR
),
],
Operand.AND
)
records = yield self.service.recordsFromExpression(
expression, recordTypes=[
self.service.recordType.user,
self.service.recordType.group
]
)
# We should get back users and groups:
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokens(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens([u"be", u"test"])
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestgroupbetty", "odtestgroupalbert"
],
["odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb"]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokensWithContextUser(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens(
[u"be", u"test"],
context=self.calService.searchContext_user
)
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert",
],
[
"odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb",
"odtestgroupbetty", "odtestgroupalbert"
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingTokensWithContextGroup(self):
self.calService = CalOpenDirectoryService()
records = yield self.calService.recordsMatchingTokens(
[u"be", u"test"],
context=self.calService.searchContext_group
)
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert"
],
[
"odtestamanda", "odtestbill", "odtestgroupa", "odtestgroupb",
"odtestbetty", "odtestalbert", "anotherodtestalbert"
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingMultipleFieldsNoRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"be", MatchFlags.caseInsensitive, MatchType.contains),
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=None
))
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert",
"odtestbetty", "odtestalbert", "anotherodtestalbert"
],
[
"odtestamanda",
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingSingleFieldNoRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=None
))
self.verifyResults(
records,
[
"odtestgroupbetty", "odtestgroupalbert",
"odtestbetty", "odtestalbert", "anotherodtestalbert",
"odtestamanda",
],
[
"nobody",
]
)
@onlyIfPopulated
@inlineCallbacks
def test_recordsMatchingFieldsWithRecordType(self):
self.calService = CalOpenDirectoryService()
fields = (
(u"fullNames", u"be", MatchFlags.caseInsensitive, MatchType.contains),
(u"fullNames", u"test", MatchFlags.caseInsensitive, MatchType.contains),
)
records = (yield self.calService.recordsMatchingFields(
fields, operand=Operand.AND, recordType=self.calService.recordType.user
))
self.verifyResults(
records,
[
"odtestbetty", "odtestalbert", "anotherodtestalbert"
],
[
"odtestamanda", "odtestgroupalbert", "odtestgroupbetty",
]
)
|
nathanbjenx/cairis
|
cairis/gui/ConcernAssociationListCtrl.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.ARM import *
from ConcernAssociationDialog import ConcernAssociationDialog
__author__ = 'Shamal Faily'
class ConcernAssociationListCtrl(wx.ListCtrl):
def __init__(self,parent,winId,dp,boxSize=wx.DefaultSize):
wx.ListCtrl.__init__(self,parent,winId,size=boxSize,style=wx.LC_REPORT)
self.dbProxy = dp
self.theCurrentEnvironment = ''
self.InsertColumn(0,'Source')
self.SetColumnWidth(0,100)
self.InsertColumn(1,'n')
self.SetColumnWidth(1,50)
self.InsertColumn(2,'Link Verb')
self.SetColumnWidth(2,75)
self.InsertColumn(3,'n')
self.SetColumnWidth(3,50)
self.InsertColumn(4,'Target')
self.SetColumnWidth(4,100)
self.theSelectedIdx = -1
self.theDimMenu = wx.Menu()
self.theDimMenu.Append(CONCA_MENUADD_ID,'Add')
self.theDimMenu.Append(CONCA_MENUDELETE_ID,'Delete')
self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)
wx.EVT_MENU(self.theDimMenu,CONCA_MENUADD_ID,self.onAddAssociation)
wx.EVT_MENU(self.theDimMenu,CONCA_MENUDELETE_ID,self.onDeleteAssociation)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onAssociationActivated)
def setEnvironment(self,environmentName):
self.theCurrentEnvironment = environmentName
def OnRightDown(self,evt):
self.PopupMenu(self.theDimMenu)
def onAddAssociation(self,evt):
dlg = ConcernAssociationDialog(self,self.dbProxy,self.theCurrentEnvironment)
if (dlg.ShowModal() == CONCERNASSOCIATION_BUTTONCOMMIT_ID):
self.theSelectedIdx = self.GetItemCount()
self.InsertStringItem(self.theSelectedIdx,dlg.source())
self.SetStringItem(self.theSelectedIdx,1,dlg.sourceMultiplicity())
self.SetStringItem(self.theSelectedIdx,2,dlg.link())
self.SetStringItem(self.theSelectedIdx,3,dlg.targetMultiplicity())
self.SetStringItem(self.theSelectedIdx,4,dlg.target())
def onDeleteAssociation(self,evt):
if (self.theSelectedIdx == -1):
errorText = 'No association selected'
errorLabel = 'Delete concern'
dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
selectedValue = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def onAssociationActivated(self,evt):
self.theSelectedIdx = evt.GetIndex()
source = self.GetItemText(self.theSelectedIdx)
sourceMultiplicity = self.GetItem(self.theSelectedIdx,1)
link = self.GetItem(self.theSelectedIdx,2)
targetMultiplicity = self.GetItem(self.theSelectedIdx,3)
target = self.GetItem(self.theSelectedIdx,4)
dlg = ConcernAssociationDialog(self,self.dbProxy,self.theCurrentEnvironment,source,sourceMultiplicity.GetText(),link.GetText(),target.GetText(),targetMultiplicity.GetText())
if (dlg.ShowModal() == CONCERNASSOCIATION_BUTTONCOMMIT_ID):
self.SetStringItem(self.theSelectedIdx,0,dlg.source())
self.SetStringItem(self.theSelectedIdx,1,dlg.sourceMultiplicity())
self.SetStringItem(self.theSelectedIdx,2,dlg.link())
self.SetStringItem(self.theSelectedIdx,3,dlg.targetMultiplicity())
self.SetStringItem(self.theSelectedIdx,4,dlg.target())
def load(self,assocs):
for source,sourceMultiplicity,link,target,targetMultiplicity in assocs:
idx = self.GetItemCount()
self.InsertStringItem(idx,source)
self.SetStringItem(idx,1,sourceMultiplicity)
self.SetStringItem(idx,2,link)
self.SetStringItem(idx,3,targetMultiplicity)
self.SetStringItem(idx,4,target)
def dimensions(self):
assocs = []
for x in range(self.GetItemCount()):
source = self.GetItemText(x)
sourceMultiplicity = self.GetItem(x,1)
link = self.GetItem(x,2)
targetMultiplicity = self.GetItem(x,3)
target = self.GetItem(x,4)
assocs.append((source,sourceMultiplicity.GetText(),link.GetText(),target.GetText(),targetMultiplicity.GetText()))
return assocs
|
vivekpabani/projecteuler
|
python/017/problem_017.py
|
#!/usr/bin/env python
"""
Problem Definition :
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
"""
def main():
ones = ['', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
double = ['Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
tens = ['', '', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
hundreds = ['', '', '', 'Hundred', 'Thousand']
words = 0
for num in xrange(1, 101):
word = ''
if len(str(num)) == 4:
digit = int(num/1000)
word = word + ones[digit] + hundreds[4]
num %= 1000
if len(str(num)) == 3:
digit = int(num/100)
word = word + ones[digit] + hundreds[3]
num %= 100
if num:
word += 'And'
if len(str(num)) == 2:
digit = int(num/10)
if digit == 1:
num %= 10
word += double[num]
num = 0
else:
word += tens[digit]
num %= 10
if len(str(num)) == 1:
word += ones[num]
words += len(word)
print words
if __name__ == '__main__':
main()
|
shengwen1994/calvin-base
|
setup.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def read_desc(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='calvin',
version='0.1',
url="http://github.com/EricssonResearch/calvin-base",
license="Apache Software License",
author="Ericsson Research",
author_email="N/A",
tests_require=[
'mock>1.0.1',
'pytest>=1.4.25',
'pytest-twisted'
],
install_requires=[
'colorlog>=2.6.0',
'kademlia>=0.4',
'ply>=3.4',
'Twisted>=15.0.0',
'requests >= 2.6.0',
'infi.traceback>=0.3.11',
' wrapt==1.10.2'
],
description="Calvin is a distributed runtime and development framework for an actor based dataflow"
"programming methodology",
long_description=read_desc('README.md'),
packages=["calvin"],
include_package_data=True,
platforms='any',
test_suite="calvin.test.test_calvin",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Natural Language :: English",
"Intended Audience :: Developers",
"Topic :: Software Development",
],
extras_require={
'testing': ['pytest', 'mock']
},
entry_points={
'console_scripts': [
'csdeploy=calvin.Tools.deploy_app:main',
'csdocs=calvin.Tools.calvindoc:main',
'cscompile=calvin.Tools.cscompiler:main',
'csinstall=calvin.Tools.csinstaller:main',
'csweb=calvin.Tools.www.csweb:main'
]
}
)
|
semprix/CTFIgniter
|
payload/CTFd/tests/helpers.py
|
from CTFd import create_app
from CTFd.models import *
from sqlalchemy_utils import database_exists, create_database, drop_database
from sqlalchemy.engine.url import make_url
def create_ctfd(ctf_name="CTFd", name="admin", email="admin@ctfd.io", password="password", setup=True):
app = create_app('CTFd.config.TestingConfig')
url = make_url(app.config['SQLALCHEMY_DATABASE_URI'])
if url.drivername == 'postgres':
url.drivername = 'postgresql'
if database_exists(url):
drop_database(url)
create_database(url)
with app.app_context():
app.db.create_all()
if setup:
with app.app_context():
with app.test_client() as client:
data = {}
r = client.get('/setup') # Populate session with nonce
with client.session_transaction() as sess:
data = {
"ctf_name": ctf_name,
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/setup', data=data)
return app
def register_user(app, name="user", email="user@ctfd.io", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/register')
with client.session_transaction() as sess:
data = {
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/register', data=data)
def login_as_user(app, name="user", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/login')
with client.session_transaction() as sess:
data = {
"name": name,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/login', data=data)
return client
def gen_challenge(db, name='chal_name', description='chal_description', value=100, category='chal_category', type=0):
chal = Challenges(name, description, value, category)
db.session.add(chal)
db.session.commit()
return chal
def gen_award(db, teamid, name="award_name", value=100):
award = Awards(teamid, name, value)
db.session.add(award)
db.session.commit()
return award
def gen_tag(db, chal, tag='tag_tag'):
tag = Tags(chal, tag)
db.session.add(tag)
db.session.commit()
return tag
def gen_file():
pass
def gen_flag(db, chal, flag='flag', key_type=0):
key = Keys(chal, flag, key_type)
db.session.add(key)
db.session.commit()
return key
def gen_team(db, name='name', email='user@ctfd.io', password='password'):
team = Teams(name, email, password)
db.session.add(team)
db.session.commit()
return team
def gen_solve(db, chalid, teamid, ip='127.0.0.1', flag='rightkey'):
solve = Solves(chalid, teamid, ip, flag)
db.session.add(solve)
db.session.commit()
return solve
def gen_wrongkey(db, teamid, chalid, flag='wrongkey'):
wrongkey = WrongKeys(teamid, chalid, flag)
db.session.add(wrongkey)
db.session.commit()
return wrongkey
def gen_tracking(db, ip, team):
tracking = Tracking(ip, team)
db.session.add(tracking)
db.session.commit()
return tracking
def gen_page(db, route, html):
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return page
|
birdage/ooi-ui-services
|
ooiservices/app/main/c2_mission.py
|
#!/usr/bin/env python
'''
API v1.0 Command and Control (C2) routes for Mission Control
'''
__author__ = 'Edna Donoughe'
from flask import jsonify
from ooiservices.app.main import api
from ooiservices.app.models import Array
import json
from ooiservices.app.main.errors import bad_request
from ooiservices.app.main.authentication import auth
from ooiservices.app.decorators import scope_required
from ooiservices.app.main.c2 import read_store
from ooiservices.app.main.c2 import _get_platform, _get_instrument, _get_instruments
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - array routes
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/array/<string:array_code>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_array_mission_display(array_code):
#Get C2 array mission (display), return mission_display (contents of platform Mission tab)
array = Array.query.filter_by(array_code=array_code).first()
if not array:
return bad_request('unknown array (array_code: \'%s\')' % array_code)
mission_display = {}
return jsonify(mission_display=mission_display)
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - platform
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/platform/<string:reference_designator>/mission/instruments_list', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_instruments_list(reference_designator):
# C2 get [platform] Mission tab instruments_list, return instruments [{instrument1}, {instrument2}, ...]
# where each instrument dictionary (is a row in instruments list) contains:
# {'reference_designator': reference_designator, 'instrument_deployment_id': id, 'display_name': display_name }
# Samples:
# http://localhost:4000/c2/platform/reference_designator/mission/instruments_list
# http://localhost:4000/c2/platform/reference_designator/mission/instruments_list
contents = []
platform_info = {}
platform_deployment = _get_platform(reference_designator)
if platform_deployment:
# get ordered set of instrument_deployments for platform
# Get instruments for this platform
instruments, oinstruments = _get_instruments(reference_designator)
# create list of reference_designators (instruments) and accumulate dict result (key=reference_designator) for output
for instrument_deployment in instruments:
row = {}
row['reference_designator'] = instrument_deployment['reference_designator']
if instrument_deployment['display_name']:
row['display_name'] = instrument_deployment['display_name']
else:
row['display_name'] = instrument_deployment['reference_designator']
platform_info[instrument_deployment['reference_designator']] = row
# Create list of dictionaries representing row(s) for 'data' (ordered by reference_designator)
# 'data' == rows for initial grid ('Current Status')
for instrument_reference_designator in oinstruments:
if instrument_reference_designator in platform_info:
contents.append(platform_info[instrument_reference_designator])
return jsonify(instruments=contents)
@api.route('/c2/platform/<string:reference_designator>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_display(reference_designator):
#Get C2 platform Mission tab contents, return mission_display
mission_display = {}
platform = _get_platform(reference_designator)
if platform:
mission_display = {} # todo populate display content
return jsonify(mission_display=mission_display)
# - - - - - - - - - - - - - - - - - - - - - - - -
# C2 Mission Control - instrument
# - - - - - - - - - - - - - - - - - - - - - - - -
@api.route('/c2/instrument/<string:reference_designator>/mission_display', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_display(reference_designator):
#Get C2 instrument Mission tab contents, return mission_display
mission_display = {}
instrument = _get_instrument(reference_designator)
if instrument:
mission_display = {} # todo populated display content
return jsonify(mission_display=mission_display)
@api.route('/c2/platform/<string:reference_designator>/mission_selections', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_selections(reference_designator):
# C2 get platform Mission tab mission selections content, return mission_selections [{},{}...]
# return list of platform mission plans
mission_selections = []
platform = _get_platform(reference_designator)
if platform:
mission_selections = _get_mission_selections(reference_designator)
return jsonify(mission_selections=mission_selections)
@api.route('/c2/instrument/<string:reference_designator>/mission_selections', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_selections(reference_designator):
# C2 get instrument Mission tab mission selections content, return mission_selections [{},{}...]
# return list of instrument mission plans
mission_selections = []
instrument = _get_instrument(reference_designator)
if instrument:
mission_selections = _get_mission_selections(reference_designator)
return jsonify(mission_selections=mission_selections)
@api.route('/c2/platform/<string:reference_designator>/mission_selection/<string:mission_plan_store>', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_platform_mission_selection(reference_designator, mission_plan_store):
# C2 get [platform] selected mission_plan content, return mission_plan
if not mission_plan_store:
return bad_request('mission_plan_store parameter is empty')
mission_plan = {}
platform = _get_platform(reference_designator)
if platform:
mission_plan = _get_mission_selection(mission_plan_store)
return jsonify(mission_plan=mission_plan)
@api.route('/c2/instrument/<string:reference_designator>/mission_selection/<string:mission_plan_store>', methods=['GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_get_instrument_mission_selection(reference_designator, mission_plan_store):
# C2 get [instrument] selected mission_plan content from store (file, uframe), return mission_plan
if not mission_plan_store:
return bad_request('mission_plan_store parameter is empty')
mission_plan = {}
instrument = _get_instrument(reference_designator)
if instrument:
mission_plan = _get_mission_selection(mission_plan_store)
return jsonify(mission_plan=mission_plan)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# private helper methods
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _get_mission_selections(reference_designator):
mission_selections = []
response_text = json_get_uframe_mission_selections(reference_designator)
if response_text:
try:
mission_selections = json.loads(response_text)
except:
return bad_request('Malformed mission_selections; not in valid json format. (reference designator \'%s\')'
% reference_designator)
return mission_selections
def _get_mission_selection(mission_plan_store):
mission_plan = []
response_text = json_get_uframe_mission_selection(mission_plan_store)
if response_text:
try:
mission_plan.append(response_text)
except:
return bad_request('Malformed mission_plan data; not in valid json format.')
return mission_plan
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Private helpers for file data (./ooiuiservices/tests/c2data/*)
# Each of these will be replaced with interface to uframe or other interface (other than file)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def json_get_uframe_mission_selections(reference_designator):
try:
data = None
if reference_designator:
if len(reference_designator) == 27:
mission_type = 'instrument'
elif len(reference_designator) == 14:
mission_type = 'platform'
else:
return []
filename = "_".join([mission_type, 'missions'])
data = read_store(filename)
except:
return None
return data
def json_get_uframe_mission_selection(mission_plan_filename):
try:
data = None
if mission_plan_filename:
data = read_store(mission_plan_filename)
except:
return None
return data
|
sony/nnabla
|
python/test/function/test_broadcast_to.py
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import (
function_tester,
list_ctx_and_func_name)
def copying_to_leaf(x, y, axis):
return (len(x.shape) - len(y.shape) - axis) == 0
def ref_broadcast_to(x, y, axis):
if axis < 0 or copying_to_leaf(x, y, axis):
# Copy data to leaf
return np.ones(x.shape) * y
# Copy data from specified axis
xs = len(x.shape)
ys = len(y.shape)
if xs == 2:
t = y[:, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif xs == 3:
if ys == 1:
if axis == 0:
t = y[:, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif ys == 2:
if axis == 0:
t = y[:, :, np.newaxis]
return np.broadcast_to(t, x.shape)
elif xs == 4:
if ys == 1:
if axis == 0:
t = y[:, np.newaxis, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, np.newaxis, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif axis == 2:
t = y[np.newaxis, np.newaxis, :, np.newaxis]
t.transpose()
return np.broadcast_to(t, x.shape)
elif ys == 2:
if axis == 0:
t = y[:, :, np.newaxis, np.newaxis]
return np.broadcast_to(t, x.shape)
elif axis == 1:
t = y[np.newaxis, :, :, np.newaxis]
return np.broadcast_to(t, x.shape)
elif ys == 3:
if axis == 0:
t = y[:, :, :, np.newaxis]
return np.broadcast_to(t, x.shape)
PARAMS = [
((2, 3), (2), 0),
((2, 3), (3), 1),
((2, 3, 4), (2), 0),
((2, 3, 4), (3), 1),
((2, 3, 4), (4), 2),
((2, 3, 4), (2, 3), 0),
((2, 3, 4), (3, 4), 1),
((2, 3, 4, 5), (2), 0),
((2, 3, 4, 5), (3), 1),
((2, 3, 4, 5), (4), 2),
((2, 3, 4, 5), (5), 3),
((2, 3, 4, 5), (2, 3), 0),
((2, 3, 4, 5), (3, 4), 1),
((2, 3, 4, 5), (4, 5), 2),
((2, 3, 4, 5), (2, 3, 4), 0),
((2, 3, 4, 5), (3, 4, 5), 1),
((2, 3, 4, 5), (5), -1),
((2, 3, 4, 5), (4, 5), -1),
((2, 3, 4, 5), (3, 4, 5), -1),
((2, 3, 4, 5), (2, 3, 4, 5), -1),
((2, 3, 4, 5), (2, 3, 4, 5), -2)
]
@pytest.mark.parametrize("seed", [314])
@pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast_to']))
@pytest.mark.parametrize("xs, ys, axis", PARAMS)
def test_broadcast_to_forward(xs, ys, axis, seed, fname, ctx, func_name):
rng = np.random.RandomState(seed)
ref_func = eval('ref_' + fname)
func = getattr(F, fname)
inputs = [rng.random_sample(xs), rng.random_sample(ys)]
function_tester(rng, func, ref_func, inputs, [axis],
backward=[False, False],
ctx=ctx, func_name=func_name)
|
RedHatInsights/insights-core
|
insights/parsers/satellite_postgresql_query.py
|
"""
Satellite PostgreSQL database queries
=====================================
This module contains the following parsers:
SatelliteAdminSettings - command ``psql -d foreman -c 'select name, value, "default" from settings where name in (\'destroy_vm_on_host_delete\', \'unregister_delete_host\') --csv'``
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SatelliteComputeResources - command ``psql -d foreman -c 'select name, type from compute_resources' --csv``
-----------------------------------------------------------------------------------------------------------
SatelliteCoreTaskReservedResourceCount - command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv``
-------------------------------------------------------------------------------------------------------------------------------
SatelliteQualifiedCapsules - command ``psql -d foreman -c "select name from smart_proxies where download_policy = 'background'" --csv``
---------------------------------------------------------------------------------------------------------------------------------------
SatelliteQualifiedKatelloRepos - command ``psql -d foreman -c "select id, name, url, download_policy from katello_root_repositories where download_policy = 'background' or url is NULL" --csv``
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``
-------------------------------------------------------------------------------------------------------------------
"""
import os
import yaml
from csv import DictReader
from insights import parser, CommandParser
from insights.specs import Specs
from insights.parsers import SkipException, ParseException
from insights.parsers import keyword_search, calc_offset
from insights.util import deprecated
class SatellitePostgreSQLQuery(CommandParser, list):
"""
Parent class of satellite postgresql table queries. It can not be used
directly, A child class with overriding columns attribute is required.
It saves the rows data into a list. Each row is saved into a dict.
The key is the column name, the value is the value of the column.
Resultant data structure::
[
{
'name': 'abc',
'url': '',
'value': 'test'
},
{
'name': 'def',
'url': 'http://xx.com',
'value': ''
}
]
Sample Output::
name,url,value
abc,,test
def,http://xx.com,
Raises:
SkipException: when there isn't data in the table
ParseException: when the output isn't in good csv format.
NotImplementedError: when the subclass doesn't override the columns attribute.
"""
# child class should override the columns attribute with its own column names
columns = []
def parse_content(self, content):
if not self.columns:
raise NotImplementedError("Please override the columns attribute.")
start_index = calc_offset(content, self.columns, require_all=True)
valid_lines = content[start_index:]
reader = DictReader(os.linesep.join(valid_lines).splitlines(True))
for row in reader:
self.append(row)
if not self:
raise SkipException("There is no data in the table.")
def search(self, **kwargs):
"""
Get the rows by searching the table with kwargs.
This uses the :py:func:`insights.parsers.keyword_search` function for
searching; see its documentation for usage details. If no search
parameters are given, no rows are returned.
It simplify the value of the column according to actual usage.
Returns:
list: A list of dictionaries of rows that match the given
search criteria.
"""
return keyword_search(self, **kwargs)
@parser(Specs.satellite_settings)
class SatelliteAdminSettings(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c '"select name, value, "default" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host') --csv"``.
Sample output::
name,value,default
unregister_delete_host,"--- true
...","--- false
..."
destroy_vm_on_host_delete,,"--- true
..."
Examples:
>>> type(table)
<class 'insights.parsers.satellite_postgresql_query.SatelliteAdminSettings'>
>>> table.get_setting('unregister_delete_host')
True
>>> table.get_setting('destroy_vm_on_host_delete')
True
"""
columns = ['name', 'value', 'default']
def _parse_yaml(self, value):
if value:
try:
return yaml.safe_load(value)
except Exception:
raise ParseException("Bad format value: %s" % value)
return value
def parse_content(self, content):
"""
The "default" and "value" columns must be selected, or else the
settings value can't be determined.
The "default" and "value" column are in yaml format, it is transfer to
python object.
Raises:
SkipException: when value or default column isn't found in the
table.
ParseException: when the value or default in bad yaml format.
"""
super(SatelliteAdminSettings, self).parse_content(content)
for row in self:
row['default'] = self._parse_yaml(row['default'])
row['value'] = self._parse_yaml(row['value'])
def get_setting(self, setting_name):
"""
Get the actual value of setting_name.
If the value column isn't empty, the value of the setting_name is the
value column, or else it's the default column.
Args:
setting_name (str): the value of name column which is searched in the table.
Returns:
It depends on the setting, maybe boolean, string, int or a list.
None if the setting_name doesn't exist in the table.
"""
rows = self.search(name=setting_name)
if rows:
value = rows[0].get('value')
return rows[0].get('default') if value == '' else value
@parser(Specs.satellite_compute_resources)
class SatelliteComputeResources(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c 'select name, type from compute_resources' --csv``.
Sample output::
name,type
test_compute_resource1,Foreman::Model::Libvirt
test_compute_resource2,Foreman::Model::RHV
Examples:
>>> type(resources_table)
<class 'insights.parsers.satellite_postgresql_query.SatelliteComputeResources'>
>>> rows=resources_table.search(type='Foreman::Model::Libvirt')
>>> len(rows)
1
>>> rows[0]['name']
'test_compute_resource1'
"""
columns = ['name', 'type']
@parser(Specs.satellite_core_taskreservedresource_count)
class SatelliteCoreTaskReservedResourceCount(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv``.
Sample output::
count
0
Examples:
>>> type(tasks)
<class 'insights.parsers.satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount'>
>>> tasks[0]['count']
'0'
"""
columns = ['count']
@parser(Specs.satellite_katello_empty_url_repositories)
class SatelliteKatelloEmptyURLRepositories(SatellitePostgreSQLQuery):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.satellite_postgresql_query.SatelliteQualifiedKatelloRepos` instead.
Parse the output of the command ``psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv``.
Sample output::
id,name
54,testa
55,testb
Examples:
>>> type(katello_root_repositories)
<class 'insights.parsers.satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories'>
>>> len(katello_root_repositories)
2
>>> katello_root_repositories[0]['name']
'testa'
"""
columns = ['id', 'name']
def __init__(self, *args, **kwargs):
deprecated(SatelliteKatelloEmptyURLRepositories, 'Please use the SatelliteQualifiedKatelloRepos parser in the current module.')
super(SatelliteKatelloEmptyURLRepositories, self).__init__(*args, **kwargs)
@parser(Specs.satellite_qualified_katello_repos)
class SatelliteQualifiedKatelloRepos(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c "select id, name, url, download_policy from katello_root_repositories where download_policy = 'background' or url is NULL" --csv``.
Sample output::
id,name,url,download_policy
2,Red Hat Satellite Tools 6.8 for RHEL 7 Server RPMs x86_64,,on_demand
3,Red Hat Enterprise Linux 8 for x86_64 - AppStream RPMs 8,https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os,background
4,Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server,https://cdn.redhat.com/content/dist/rhel/server/7/7Server/x86_64/os,background
Examples:
>>> type(repos)
<class 'insights.parsers.satellite_postgresql_query.SatelliteQualifiedKatelloRepos'>
>>> len(repos)
3
>>> repos[0]['name']
'Red Hat Satellite Tools 6.8 for RHEL 7 Server RPMs x86_64'
"""
columns = ['id', 'name', 'url', 'download_policy']
@parser(Specs.satellite_qualified_capsules)
class SatelliteQualifiedCapsules(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d foreman -c "select name from smart_proxies where download_policy = 'background'" --csv``.
Sample output::
name
capsule1.test.com
capsule2.test.com
Examples:
>>> type(capsules)
<class 'insights.parsers.satellite_postgresql_query.SatelliteQualifiedCapsules'>
>>> len(capsules)
2
>>> capsules[0]['name']
'capsule1.test.com'
"""
columns = ['name']
@parser(Specs.satellite_sca_status)
class SatelliteSCAStatus(SatellitePostgreSQLQuery):
"""
Parse the output of the command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``.
Sample output::
displayname,content_access_mode
Default Organization,entitlement
Orgq,org_environment
Examples:
>>> type(sat_sca_info)
<class 'insights.parsers.satellite_postgresql_query.SatelliteSCAStatus'>
>>> sat_sca_info.sca_enabled
True
"""
columns = ['displayname', 'content_access_mode']
@property
def sca_enabled(self):
"""
If the value of content_access_mode is "org_environment", it means the SCA is enabled for this organization.
Return True if any organization has SCA enabled on the satellite else False
"""
return bool(len(self.search(content_access_mode='org_environment')))
|
michaupl/braincloud
|
brainblog/tasks.py
|
import datetime
from django.contrib.sessions.models import Session
from celery import task
from celery.utils.log import get_task_logger
from brainindex.index import create_thought, update_thought, delete_thought, CREATE, UPDATE, DELETE
logger = get_task_logger(__name__)
@task(ignore_result = True)
def clear_expired_sessions():
moment = datetime.datetime.now()
Session.objects.filter(expire_date__lte = moment).delete()
@task(ignore_result = True)
def index_operation(thought, op_type):
if op_type == CREATE:
create_thought(thought)
elif op_type == UPDATE:
update_thought(thought)
elif op_type == DELETE:
delete_thought(thought)
else:
logger.warn('Unsupported index operation.')
|
w4n9H/PythonSkillTree
|
Distributed/ProcessThread/LocalProcessQueue.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: w4n9@sina.com
@create: 16/7/1
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
from multiprocessing import Process, Queue
import uuid
def process(q):
"""
test process
"""
content = str(uuid.uuid4())
for i in range(4):
q.put({'content': content})
def main():
"""
main process
"""
q = Queue()
plist = []
for i in range(4):
proc = Process(target=process, args=(q,))
plist.append(proc)
for proc in plist:
proc.start()
for proc in plist:
proc.join()
while True:
if q.empty():
print "empty"
break
else:
print q.get()
|
Interoute/API-fun-and-education
|
dcg_member_listing.py
|
#! /usr/bin/env python
# Python script for the Interoute Virtual Data Centre API:
# Name: dcg_member_listing.py
# Purpose: List the properties and membership of Direct Connect Groups
# Requires: class VDCApiCall in the file vdc_api_call.py
# See the repo: https://github.com/Interoute/API-fun-and-education
#
# You can pass options via the command line: type 'python dcg_member_listing.py -h' for usage information
#
# The VDC account used must be able to access the VDC regions in the argument 'regionlist'.
# Use the regionlist argument to change the regions for a limited account (for example, a 14-day trial account is excluded from Asia region)
# Example of passing region names as arguments (do not use braces, quotes or commas): 'python dcg_member_listing.py --regionlist Europe USA -n'
#
# Copyright (C) Interoute Communications Limited, 2017
from __future__ import print_function
import vdc_api_call as vdc
import getpass
import json
import os
import string
import datetime
import argparse
import re
import sys
def print_network_members(vmList,networkid,isProvisioned,prefixChars):
networkmembers = []
for vm in vmList:
for i in range(len(vm['nic'])):
if networkid == vm['nic'][i]['networkid']:
networkmembers.append([int(vm['nic'][i]['ipaddress'].split('.')[-1]),vm['nic'][i]['ipaddress'],vm['name'],vm['id']])
break # Can break out of this loop as soon as the network id is found for a NIC
if len(networkmembers)>0:
networkmembers.sort() # VMs will be sorted by the last segment of their IP address (=first element of each members list)
for i in range(len(networkmembers)):
if i==len(networkmembers)-1: #this is last VM in the network
print(prefixChars + unichr(0x2514)+" %s: '%s'" % (networkmembers[i][1],networkmembers[i][2]))
else:
print(prefixChars + unichr(0x251C)+" %s: '%s'" % (networkmembers[i][1],networkmembers[i][2]))
else:
if isProvisioned:
print(prefixChars + "*(NO MEMBERS)")
else:
print(prefixChars + "*(NOT PROVISIONED)")
if __name__ == '__main__':
# STEP 1: Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default=os.path.join(os.path.expanduser('~'), '.vdcapi'),
help="path/name of the config file to be used for the API URL and API keys")
parser.add_argument("-d", "--dcgname", help="Show information only for the specified DCG name")
parser.add_argument("-b", "--dcgid", help="Show information only for the specified DCG ID")
parser.add_argument("-n","--netmem",action='store_true',
help="show the VM members of the Private Direct Connect networks")
parser.add_argument("-r","--regionlist",default=['Europe', 'USA', 'Asia'],nargs='+',
help="specify the list of regions to be checked")
# Note : The VDC account used must be able to access all of the VDC regions in the argument 'regionlist'.
# Use this argument to change the list for a limited account (for example, a 14-day trial account is excluded from Asia region)
config_file = parser.parse_args().config
dcgid_requested = parser.parse_args().dcgid
dcgname_requested = parser.parse_args().dcgname
show_netmem = parser.parse_args().netmem
vdcRegions = parser.parse_args().regionlist
# STEP 2: If config file is found, read its content,
# else query user for the URL, API key, Secret key
if os.path.isfile(config_file):
with open(config_file) as fh:
data = fh.read()
config = json.loads(data)
api_url = config['api_url']
apiKey = config['api_key']
secret = config['api_secret']
else:
print('API url (e.g. http://10.220.18.115:8080/client/api):', end='')
api_url = raw_input()
print('API key:', end='')
apiKey = raw_input()
secret = getpass.getpass(prompt='API secret:')
# STEP 3: Create the api access object
api = vdc.VDCApiCall(api_url, apiKey, secret)
# STEP 4: API calls to get the information about DCGs and networks
if dcgid_requested:
dcgList = api.listDirectConnectGroups({'id':dcgid_requested})
if dcgList['count'] == 0:
print("ERROR: The dcgid input did not match a DCG in this VDC account.")
sys.exit("FATAL: Program terminating")
elif dcgname_requested:
dcgList = api.listDirectConnectGroups({'name':dcgname_requested})
if dcgList['count'] == 0:
print("ERROR: The dcgname input did not match a DCG in this VDC account.")
sys.exit("FATAL: Program terminating")
else:
dcgList = api.listDirectConnectGroups({})
networksLists = {}
if show_netmem:
vmLists = {}
for r in vdcRegions:
nlistPDC = api.listNetworks({'region': r, 'subtype': 'privatedirectconnect'})
nlistPDCEgress = api.listNetworks({'region': r, 'subtype': 'privatedirectconnectwithgatewayservicesegress'})
if nlistPDC['count'] == 0 and nlistPDCEgress['count'] == 0: # there are no PrivateDirectConnect networks in this region
networksLists[r] = {'count':0, 'network':[]}
else:
networksLists[r] = {'count': nlistPDC['count'] + nlistPDCEgress['count'], 'network': nlistPDC['network'] + nlistPDCEgress['network']}
if show_netmem:
zonesResponse = api.listZones({'region':r})
zonesList = [z['name'] for z in zonesResponse['zone']]
vmRawList = api.listVirtualMachines({'region':r})
for z in zonesList:
try:
vmLists[z] = [v for v in vmRawList['virtualmachine'] if v['zonename']==z]
except KeyError: # there are no VMs in this region so lookup in the dict will fail
vmLists[z] = []
# STEP 5: Process the information from the API calls
try:
checkTime = datetime.datetime.utcnow() # get the current time (UTC = GMT)
print("\nDirect Connect Group listing for the account '%s' checked at %s:"
% (api.getApiLimit({})['apilimit']['account'], checkTime.strftime("%Y-%m-%d %H:%M:%S UTC")))
if dcgid_requested:
print("\n** Results are shown only for dcgid=%s" % dcgid_requested)
elif dcgname_requested:
print("\n** Results are shown only for dcgname=\'%s\'" % dcgname_requested)
if len(vdcRegions)==3:
print("\n** All VDC regions are being scanned for Private Direct Connect networks")
else:
print("\n** Only these regions will be scanned and their Private Direct Connect networks shown: %s" % (vdcRegions))
print("\n** Networks which have 'isprovisioned' set to False are labelled with '/NotProv/' and are not functional")
print("** Output may not be correct for DCGs and networks that were not created with NetworkAPI functions because\n** they may be missing the information in the listNetworks call which identifies the DCG membership of the network.")
print("** (+E) denotes networks with gateway services for Internet egress\n")
for d in dcgList['directconnectgroups']:
print(" "+unichr(0x2015)+' \'%s\' (dcgid: %s)' % (d['name'], d['id']))
members = []
for r in vdcRegions:
if networksLists[r]['network'] != []:
for n in networksLists[r]['network']:
if n['dcgfriendlyname'] == d['name']:
if 'isprovisioned' not in n:
n['isprovisioned'] = 'Unknown'
members.append([n['cidr'],n['name'],n['zonename'],r,n['id'],n['isprovisioned'],n['displaytext'],n['subtype']])
if len(members)>0:
members = sorted(members, key=lambda x: x[2]) #sort by zonename
members = sorted(members, key=lambda x: x[3]) #sort by region
for i in range(len(members)):
if members[i][7] == 'privatedirectconnectwithgatewayservicesegress':
egressLabel = " (+E)"
else:
egressLabel = ""
if members[i][5] == True:
provisionedLabel = ""
elif members[i][5] == False:
provisionedLabel = "/NotProv/ "
elif members[i][5] == 'Unknown':
provisionedLabel = "/ProvUnknown/ "
if i==len(members)-1: #if this is last item in list
if members[i][1] != members[i][6]: #if network 'name' and 'displaytext' are not the same
print(" "+unichr(0x2514)+" %s%s: %s'%s'|'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][6],members[i][2],members[i][3]))
else:
print(" "+unichr(0x2514)+" %s%s: %s'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][2],members[i][3]))
if show_netmem:
if vmLists[members[i][2]] != {}:
print_network_members(vmLists[members[i][2]],members[i][4],members[i][5]," ")
else:
print(" " + "*(NO MEMBERS)")
else:
if members[i][1] != members[i][6]: #if network 'name' and 'displaytext' are not the same
print(" "+unichr(0x251C)+" %s%s: %s'%s'|'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][6],members[i][2],members[i][3]))
else:
print(" "+unichr(0x251C)+" %s%s: %s'%s' (%s, %s)" % (members[i][0],egressLabel,provisionedLabel,members[i][1],members[i][2],members[i][3]))
if show_netmem:
if vmLists[members[i][2]] != {}:
print_network_members(vmLists[members[i][2]],members[i][4],members[i][5]," "+unichr(0x2502)+" ")
else:
print(" " + "*(NO MEMBERS)")
print(" ")
else:
print(" *(NO NETWORKS)")
print(" ")
except KeyError:
print("Exception: KeyError")
##print('Nothing to do: No Direct Connect Groups found')
|
dek-odoo/python-samples
|
python exercises/dek_program043.py
|
# !/user/bin/python
# -*- coding: utf-8 -*-
#- Author : (DEK) Devendra Kavthekar
# Write a program to generate and print another tuple whose values are
# even numbers in the given tuple (1,2,3,4,5,6,7,8,9,10).
# Hints:
# Use "for" to iterate the tuple
# Use tuple() to generate a tuple from a list.
def main():
val = tuple([value for value in range(1, 11)])
print val
list = []
for number in val:
if number % 2 == 0:
list.append(str(number))
print ','.join(list)
if __name__ == '__main__':
main()
|
liosha2007/temporary-groupdocs-python-sdk
|
groupdocs/models/CreateFolderResponse.py
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CreateFolderResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'CreateFolderResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # CreateFolderResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
GoogleCloudDataproc/dataprocmagic
|
googledataprocauthenticator/controllerwidget/createsessionwidget.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the widget under the Sessions tab within the ``%manage_dataproc widget``"""
import json
import ipyvuetify as v
import sparkmagic.utils.configuration as conf
from sparkmagic.utils.constants import LANG_SCALA, LANG_PYTHON
from sparkmagic.controllerwidget.abstractmenuwidget import AbstractMenuWidget
from googledataprocauthenticator.utils.utils import get_session_id_to_name
from googledataprocauthenticator.utils.constants import WIDGET_WIDTH
class CreateSessionWidget(AbstractMenuWidget):
def __init__(self, spark_controller, ipywidget_factory, ipython_display,
endpoints, refresh_method, state, db):
super(CreateSessionWidget, self).__init__(spark_controller, ipywidget_factory,
ipython_display, True)
self.endpoints = endpoints
self.refresh_method = refresh_method
self.properties = json.dumps(conf.session_configs())
self.state = state
self.db = db
self.delete_pressed = False
backicon = v.Icon(children=['mdi-arrow-left'])
backicon.on_event('click', self._on_back_click)
back_toolbar = v.Toolbar(
elevation="0",
children=[
v.ToolbarItems(children=[backicon]),
v.ToolbarTitle(children=['Create new session']),
v.Spacer()
],
app=True, # If true, the other widgets float under on scroll
)
self.name_textfield = v.TextField(
class_='ma-2',
placeholder='Enter session name',
label='Name',
dense=True,
color='primary',
outlined=True,
v_model=None,
)
self.endpoints_dropdown_widget = v.Select(
class_='ma-2',
placeholder='Select an endpoint',
label='Endpoint',
dense=True,
color='primary',
persistent_hint=True,
hide_selected=True,
outlined=True,
items=list(self.endpoints.keys()),
auto_select_first=True,
v_model=None,
)
self.language_dropdown = v.Select(
class_='ma-2',
label='Language',
placeholder='Select a language',
dense=True,
color='primary',
persistent_hint=True,
hide_selected=False,
outlined=True,
items=[LANG_SCALA, LANG_PYTHON],
auto_select_first=True,
v_model=None,
)
self.properties_textbox = v.TextField(
class_='ma-2',
label='Properties',
dense=True,
color='primary',
outlined=True,
v_model=json.dumps(conf.session_configs()),
)
self.create_session = v.Btn(class_='ma-2', color='primary', children=['Create'])
self.create_session.on_event('click', self._on_create_click)
self.cancel = v.Btn(class_='ma-2', color='primary', children=['Cancel'])
self.cancel.on_event('click', self._on_cancel_click)
self.create_session_container = v.Container(
style_=f'width: {WIDGET_WIDTH};', class_='ma-2',
children=[
back_toolbar,
v.Row(class_='ma-2', children=[
v.Col(children=[self.name_textfield])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.endpoints_dropdown_widget])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.language_dropdown])
]),
v.Row(class_='ma-2', children=[
v.Col(children=[self.properties_textbox])
]),
v.Row(class_='ma-2', children=[self.create_session, self.cancel]),
]
)
no_back_toolbar = v.Toolbar(
elevation="0",
children=[
v.ToolbarTitle(
titleMarginStart='12dp',
contentInsetStartWithNavigation="56dp",
children=['Sessions']
),
v.Spacer()
],
app=True, # If true, the other widgets float under on scroll
)
new_session = v.Btn(class_='ma-2', color='primary', children=['New Session'])
new_session.on_event('click', self._on_new_session_click)
self.toolbar = v.Row(children=[no_back_toolbar, new_session])
session_table_values = self._generate_session_values()
self.delete_icon = v.Icon(children=['mdi-delete'])
self.delete_icon.on_event('click', self._on_delete_icon_pressed)
self.session_table = v.DataTable(
style_=f'width: {WIDGET_WIDTH};', no_data_text='No sessions', hide_default_footer=True,
disable_pagination=True, item_key='name', headers=[
{'text': 'Session', 'align': 'start', 'sortable': False, 'value': 'name'},
{'text': 'ID', 'sortable': False, 'value': 'id'},
{'text': 'Status', 'sortable': False, 'value': 'status'},
{'text': 'Kind', 'sortable': False, 'value': 'kind'},
{'text': '', 'sortable': False, 'value': 'actions'},
],
items=session_table_values, dense=False, fixedHeader=False, v_slots=[
{'name': 'item.actions', 'children' : [self.delete_icon]},
{'name': 'no-data', 'children': ['No sessions']}
]
)
self.session_table.on_event('click:row', self._remove_row_from_table)
self.toolbar_with_table = v.Container(
style_=f'width: {WIDGET_WIDTH};', class_='mx-auto', children=[
v.Row(class_='mx-auto', children=[self.toolbar]),
v.Row(class_='mx-auto', children=[self.session_table])
]
)
self.children = [self.create_session_container, self.toolbar_with_table]
for child in self.children:
child.parent_widget = self
self._update_view()
def run(self):
pass
def _on_create_click(self, _widget, _event, _data):
try:
properties_json = self.properties_textbox.v_model
if properties_json.strip() != "":
conf.override(
conf.session_configs.__name__,
json.loads(self.properties_textbox.v_model)
)
except ValueError as caught_exc:
self.ipython_display.send_error(
"Session properties must be a valid JSON string. Error:\n{}".format(caught_exc)
)
return
endpoint = self.endpoints[self.endpoints_dropdown_widget.v_model]
language = self.language_dropdown.v_model
alias = self.name_textfield.v_model
skip = False
properties = conf.get_session_properties(language)
try:
self.spark_controller.add_session(alias, endpoint, skip, properties)
# session_id_to_name dict is necessary to restore session name across notebook sessions
# since the livy server does not store the name.
session_id_to_name = get_session_id_to_name(self.db, self.ipython_display)
# add session id -> name to session_id_to_name dict
session_id_to_name[self.spark_controller.session_manager.get_session(alias).id] = alias
self.db['autorestore/' + 'session_id_to_name'] = session_id_to_name
except ValueError as caught_exc:
self.ipython_display.send_error("""Could not add session with
name:
{}
properties:
{}
due to error: '{}'""".format(alias, properties, caught_exc))
return
self.refresh_method(0)
def _on_delete_icon_pressed(self, _widget, _event, _data):
self.delete_pressed = True
def _remove_row_from_table(self, _table, _event, row):
if self.delete_pressed:
session_name = row.get('name')
session_id = row.get('id')
try:
self.spark_controller.delete_session_by_name(session_name)
session_id_to_name = get_session_id_to_name(self.db, self.ipython_display)
session_id_to_name.pop(session_id)
self.db['autorestore/' + 'session_id_to_name'] = session_id_to_name
self.refresh_method(0)
except Exception as caught_exc:
self.ipython_display.send_error("Failed delete session due to the following "\
f"error: {str(caught_exc)}")
def _on_cancel_click(self, _widget, _event, _data):
self.state = 'list'
self._update_view()
def _on_new_session_click(self, _widget, _event, _data):
self.state = 'add'
self._update_view()
def _on_back_click(self, _widget, _event, _data):
self.state = 'list'
self._update_view()
def _generate_session_values(self):
session_table_values = []
for name, session in self.spark_controller.get_managed_clients().items():
session_table_values.append({'name':name, 'id':session.id, \
'status':session.status, 'kind':session.kind})
return session_table_values
def _update_view(self):
if self.state == 'add':
self.toolbar_with_table.layout.display = 'none'
self.create_session_container.layout.display = 'flex'
elif self.state == 'list':
self.create_session_container.layout.display = 'none'
self.toolbar_with_table.layout.display = 'flex'
|
mlflow/mlflow
|
tests/statsmodels/test_statsmodels_model_export.py
|
import pytest
import numpy as np
import pandas as pd
from unittest import mock
import os
import yaml
import mlflow.statsmodels
import mlflow.utils
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models.utils import _read_example
from mlflow.models import Model, infer_signature
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from tests.helper_functions import (
pyfunc_serve_and_score_model,
_compare_conda_env_requirements,
_assert_pip_requirements,
_is_available_on_pypi,
_compare_logged_code_paths,
)
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.statsmodels.model_fixtures import (
ols_model,
arma_model,
glsar_model,
gee_model,
glm_model,
gls_model,
recursivels_model,
rolling_ols_model,
rolling_wls_model,
wls_model,
)
EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("statsmodels") else ["--no-conda"]
# The code in this file has been adapted from the test cases of the lightgbm flavor.
def _get_dates_from_df(df):
start_date = df["start"][0]
end_date = df["end"][0]
return start_date, end_date
@pytest.fixture
def model_path(tmpdir, subdir="model"):
return os.path.join(str(tmpdir), subdir)
@pytest.fixture
def statsmodels_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pytest", "statsmodels"])
return conda_env
def _test_models_list(tmpdir, func_to_apply):
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
fixtures = [
ols_model,
arma_model,
glsar_model,
gee_model,
glm_model,
gls_model,
recursivels_model,
rolling_ols_model,
rolling_wls_model,
wls_model,
]
for algorithm in fixtures:
name = algorithm.__name__
path = os.path.join(tmpdir, name)
model = algorithm()
if isinstance(model.alg, TimeSeriesModel):
start_date, end_date = _get_dates_from_df(model.inference_dataframe)
func_to_apply(model, path, start_date, end_date)
else:
func_to_apply(model, path, model.inference_dataframe)
def _test_model_save_load(statsmodels_model, model_path, *predict_args):
mlflow.statsmodels.save_model(statsmodels_model=statsmodels_model.model, path=model_path)
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)
if hasattr(statsmodels_model.model, "predict"):
np.testing.assert_array_almost_equal(
statsmodels_model.model.predict(*predict_args),
reloaded_model.predict(*predict_args),
)
np.testing.assert_array_almost_equal(
reloaded_model.predict(*predict_args),
reloaded_pyfunc.predict(statsmodels_model.inference_dataframe),
)
def _test_model_log(statsmodels_model, model_path, *predict_args):
model = statsmodels_model.model
with TempDir(chdr=True, remove_on_exit=True) as tmp:
try:
artifact_path = "model"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
model_info = mlflow.statsmodels.log_model(
statsmodels_model=model, artifact_path=artifact_path, conda_env=conda_env
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
assert model_info.model_uri == model_uri
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_uri)
if hasattr(model, "predict"):
np.testing.assert_array_almost_equal(
model.predict(*predict_args), reloaded_model.predict(*predict_args)
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
@pytest.mark.large
def test_models_save_load(tmpdir):
_test_models_list(tmpdir, _test_model_save_load)
@pytest.mark.large
def test_models_log(tmpdir):
_test_models_list(tmpdir, _test_model_log)
def test_signature_and_examples_are_saved_correctly():
model, _, X = ols_model()
signature_ = infer_signature(X)
example_ = X[0:3, :]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.statsmodels.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert np.array_equal(_read_example(mlflow_model, path), example)
def test_model_load_from_remote_uri_succeeds(model_path, mock_s3_bucket):
model, _, inference_dataframe = arma_model()
mlflow.statsmodels.save_model(statsmodels_model=model, path=model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.statsmodels.load_model(model_uri=model_uri)
start_date, end_date = _get_dates_from_df(inference_dataframe)
np.testing.assert_array_almost_equal(
model.predict(start=start_date, end=end_date),
reloaded_model.predict(start=start_date, end=end_date),
)
def test_log_model_calls_register_model():
# Adapted from lightgbm tests
ols = ols_model()
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=conda_env,
registered_model_name="OLSModel1",
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
mlflow.register_model.assert_called_once_with(
model_uri, "OLSModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
def test_log_model_no_registered_model_name():
ols = ols_model()
artifact_path = "model"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["statsmodels"])
mlflow.statsmodels.log_model(
statsmodels_model=ols.model, artifact_path=artifact_path, conda_env=conda_env
)
mlflow.register_model.assert_not_called()
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
model_path, statsmodels_custom_env
):
ols = ols_model()
mlflow.statsmodels.save_model(
statsmodels_model=ols.model, path=model_path, conda_env=statsmodels_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != statsmodels_custom_env
with open(statsmodels_custom_env, "r") as f:
statsmodels_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == statsmodels_custom_env_parsed
def test_model_save_persists_requirements_in_mlflow_model_directory(
model_path, statsmodels_custom_env
):
ols = ols_model()
mlflow.statsmodels.save_model(
statsmodels_model=ols.model, path=model_path, conda_env=statsmodels_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(statsmodels_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_log_model_with_pip_requirements(tmpdir):
ols = ols_model()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.statsmodels.log_model(ols.model, "model", pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", "a"], strict=True)
# List of requirements
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", "b", "-c constraints.txt"],
["a"],
strict=True,
)
@pytest.mark.large
def test_log_model_with_extra_pip_requirements(tmpdir):
ols = ols_model()
default_reqs = mlflow.statsmodels.get_default_pip_requirements()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.statsmodels.log_model(ols.model, "model", extra_pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a"])
# List of requirements
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
mlflow.statsmodels.log_model(
ols.model, "model", extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_model_save_accepts_conda_env_as_dict(model_path):
ols = ols_model()
conda_env = dict(mlflow.statsmodels.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.statsmodels.save_model(statsmodels_model=ols.model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(statsmodels_custom_env):
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=statsmodels_custom_env,
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != statsmodels_custom_env
with open(statsmodels_custom_env, "r") as f:
statsmodels_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == statsmodels_custom_env_parsed
def test_model_log_persists_requirements_in_mlflow_model_directory(statsmodels_custom_env):
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(
statsmodels_model=ols.model,
artifact_path=artifact_path,
conda_env=statsmodels_custom_env,
)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(statsmodels_custom_env, saved_pip_req_path)
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
model_path,
):
ols = ols_model()
mlflow.statsmodels.save_model(statsmodels_model=ols.model, path=model_path)
_assert_pip_requirements(model_path, mlflow.statsmodels.get_default_pip_requirements())
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies():
ols = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(statsmodels_model=ols.model, artifact_path=artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
_assert_pip_requirements(model_uri, mlflow.statsmodels.get_default_pip_requirements())
def test_pyfunc_serve_and_score():
model, _, inference_dataframe = ols_model()
artifact_path = "model"
with mlflow.start_run():
mlflow.statsmodels.log_model(model, artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
resp = pyfunc_serve_and_score_model(
model_uri,
data=pd.DataFrame(inference_dataframe),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
np.testing.assert_array_almost_equal(scores, model.predict(inference_dataframe))
def test_log_model_with_code_paths():
artifact_path = "model"
ols = ols_model()
with mlflow.start_run(), mock.patch(
"mlflow.statsmodels._add_code_from_conf_to_system_path"
) as add_mock:
mlflow.statsmodels.log_model(ols.model, artifact_path, code_paths=[__file__])
model_uri = mlflow.get_artifact_uri(artifact_path)
_compare_logged_code_paths(__file__, model_uri, mlflow.statsmodels.FLAVOR_NAME)
mlflow.statsmodels.load_model(model_uri)
add_mock.assert_called()
|
stanley-cheung/grpc
|
tools/buildgen/extract_metadata_from_bazel_xml.py
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to extract build metadata from bazel BUILD.
# To avoid having two sources of truth for the build metadata (build
# targets, source files, header files etc.), this script analyzes the contents
# of bazel BUILD files and generates a YAML file (currently called
# build_autogenerated.yaml). The format and semantics of the generated YAML files
# is chosen to match the format of a "build.yaml" file, which used
# to be build the source of truth for gRPC build before bazel became
# the primary build system.
# A good basic overview of the "build.yaml" format is available here:
# https://github.com/grpc/grpc/blob/master/templates/README.md. Note that
# while useful as an overview, the doc does not act as formal spec
# (formal spec does not exist in fact) and the doc can be incomplete,
# inaccurate or slightly out of date.
# TODO(jtattermusch): In the future we want to get rid of the legacy build.yaml
# format entirely or simplify it to a point where it becomes self-explanatory
# and doesn't need any detailed documentation.
import collections
import os
import re
import subprocess
import sys
from typing import Any, Dict, Iterable, List, Optional
import xml.etree.ElementTree as ET
import build_cleaner
import yaml
BuildMetadata = Dict[str, Any]
BuildDict = Dict[str, BuildMetadata]
BuildYaml = Dict[str, Any]
def _bazel_query_xml_tree(query: str) -> ET.Element:
"""Get xml output of bazel query invocation, parsed as XML tree"""
output = subprocess.check_output(
['tools/bazel', 'query', '--noimplicit_deps', '--output', 'xml', query])
return ET.fromstring(output)
def _rule_dict_from_xml_node(rule_xml_node):
"""Converts XML node representing a rule (obtained from "bazel query --output xml") to a dictionary that contains all the metadata we will need."""
result = {
'class': rule_xml_node.attrib.get('class'),
'name': rule_xml_node.attrib.get('name'),
'srcs': [],
'hdrs': [],
'deps': [],
'data': [],
'tags': [],
'args': [],
'generator_function': None,
'size': None,
'flaky': False,
}
for child in rule_xml_node:
# all the metadata we want is stored under "list" tags
if child.tag == 'list':
list_name = child.attrib['name']
if list_name in ['srcs', 'hdrs', 'deps', 'data', 'tags', 'args']:
result[list_name] += [item.attrib['value'] for item in child]
if child.tag == 'string':
string_name = child.attrib['name']
if string_name in ['generator_function', 'size']:
result[string_name] = child.attrib['value']
if child.tag == 'boolean':
bool_name = child.attrib['name']
if bool_name in ['flaky']:
result[bool_name] = child.attrib['value'] == 'true'
return result
def _extract_rules_from_bazel_xml(xml_tree):
"""Extract bazel rules from an XML tree node obtained from "bazel query --output xml" command."""
result = {}
for child in xml_tree:
if child.tag == 'rule':
rule_dict = _rule_dict_from_xml_node(child)
rule_clazz = rule_dict['class']
rule_name = rule_dict['name']
if rule_clazz in [
'cc_library',
'cc_binary',
'cc_test',
'cc_proto_library',
'proto_library',
'upb_proto_library',
'upb_proto_reflection_library',
]:
if rule_name in result:
raise Exception('Rule %s already present' % rule_name)
result[rule_name] = rule_dict
return result
def _get_bazel_label(target_name: str) -> str:
if ':' in target_name:
return '//%s' % target_name
else:
return '//:%s' % target_name
def _extract_source_file_path(label: str) -> str:
"""Gets relative path to source file from bazel deps listing"""
if label.startswith('//'):
label = label[len('//'):]
# labels in form //:src/core/lib/surface/call_test_only.h
if label.startswith(':'):
label = label[len(':'):]
# labels in form //test/core/util:port.cc
label = label.replace(':', '/')
return label
def _extract_public_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_nonpublic_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of non-public headers from a bazel rule"""
result = []
for dep in bazel_rule['hdrs']:
if dep.startswith('//') and not dep.startswith(
'//:include/') and dep.endswith('.h'):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_sources(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of source files from a bazel rule"""
result = []
for dep in bazel_rule['srcs']:
if dep.startswith('//') and (dep.endswith('.cc') or dep.endswith('.c')
or dep.endswith('.proto')):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_deps(bazel_rule: BuildMetadata,
bazel_rules: BuildDict) -> List[str]:
"""Gets list of deps from from a bazel rule"""
return list(sorted(bazel_rule['deps']))
def _create_target_from_bazel_rule(target_name: str,
bazel_rules: BuildDict) -> BuildMetadata:
"""Create build.yaml-like target definition from bazel metadata"""
bazel_rule = bazel_rules[_get_bazel_label(target_name)]
# Create a template for our target from the bazel rule. Initially we only
# populate some "private" fields with the original info we got from bazel
# and only later we will populate the public fields (once we do some extra
# postprocessing).
result = {
'name': target_name,
'_PUBLIC_HEADERS_BAZEL': _extract_public_headers(bazel_rule),
'_HEADERS_BAZEL': _extract_nonpublic_headers(bazel_rule),
'_SRC_BAZEL': _extract_sources(bazel_rule),
'_DEPS_BAZEL': _extract_deps(bazel_rule, bazel_rules),
'public_headers': bazel_rule['_COLLAPSED_PUBLIC_HEADERS'],
'headers': bazel_rule['_COLLAPSED_HEADERS'],
'src': bazel_rule['_COLLAPSED_SRCS'],
'deps': bazel_rule['_COLLAPSED_DEPS'],
}
return result
def _external_dep_name_from_bazel_dependency(bazel_dep: str) -> Optional[str]:
"""Returns name of dependency if external bazel dependency is provided or None"""
if bazel_dep.startswith('@com_google_absl//'):
# special case for add dependency on one of the absl libraries (there is not just one absl library)
prefixlen = len('@com_google_absl//')
return bazel_dep[prefixlen:]
elif bazel_dep == '//external:upb_lib':
return 'upb'
elif bazel_dep == '//external:benchmark':
return 'benchmark'
elif bazel_dep == '//external:libssl':
return 'libssl'
else:
# all the other external deps such as protobuf, cares, zlib
# don't need to be listed explicitly, they are handled automatically
# by the build system (make, cmake)
return None
def _compute_transitive_metadata(
rule_name: str, bazel_rules: Any,
bazel_label_to_dep_name: Dict[str, str]) -> None:
"""Computes the final build metadata for Bazel target with rule_name.
The dependencies that will appear on the deps list are:
* Public build targets including binaries and tests;
* External targets, like absl, re2.
All other intermediate dependencies will be merged, which means their
source file, headers, etc. will be collected into one build target. This
step of processing will greatly reduce the complexity of the generated
build specifications for other build systems, like CMake, Make, setuptools.
The final build metadata are:
* _TRANSITIVE_DEPS: all the transitive dependencies including intermediate
targets;
* _COLLAPSED_DEPS: dependencies that fits our requirement above, and it
will remove duplicated items and produce the shortest
possible dependency list in alphabetical order;
* _COLLAPSED_SRCS: the merged source files;
* _COLLAPSED_PUBLIC_HEADERS: the merged public headers;
* _COLLAPSED_HEADERS: the merged non-public headers;
* _EXCLUDE_DEPS: intermediate targets to exclude when performing collapsing
of sources and dependencies.
For the collapsed_deps, the algorithm improved cases like:
The result in the past:
end2end_tests -> [grpc_test_util, grpc, gpr, address_sorting, upb]
grpc_test_util -> [grpc, gpr, address_sorting, upb, ...]
grpc -> [gpr, address_sorting, upb, ...]
The result of the algorithm:
end2end_tests -> [grpc_test_util]
grpc_test_util -> [grpc]
grpc -> [gpr, address_sorting, upb, ...]
"""
bazel_rule = bazel_rules[rule_name]
direct_deps = _extract_deps(bazel_rule, bazel_rules)
transitive_deps = set()
collapsed_deps = set()
exclude_deps = set()
collapsed_srcs = set(_extract_sources(bazel_rule))
collapsed_public_headers = set(_extract_public_headers(bazel_rule))
collapsed_headers = set(_extract_nonpublic_headers(bazel_rule))
for dep in direct_deps:
external_dep_name_maybe = _external_dep_name_from_bazel_dependency(dep)
if dep in bazel_rules:
# Descend recursively, but no need to do that for external deps
if external_dep_name_maybe is None:
if "_PROCESSING_DONE" not in bazel_rules[dep]:
# This item is not processed before, compute now
_compute_transitive_metadata(dep, bazel_rules,
bazel_label_to_dep_name)
transitive_deps.update(bazel_rules[dep].get(
'_TRANSITIVE_DEPS', []))
collapsed_deps.update(
collapsed_deps, bazel_rules[dep].get('_COLLAPSED_DEPS', []))
exclude_deps.update(bazel_rules[dep].get('_EXCLUDE_DEPS', []))
# This dep is a public target, add it as a dependency
if dep in bazel_label_to_dep_name:
transitive_deps.update([bazel_label_to_dep_name[dep]])
collapsed_deps.update(collapsed_deps,
[bazel_label_to_dep_name[dep]])
# Add all the transitive deps of our every public dep to exclude
# list since we want to avoid building sources that are already
# built by our dependencies
exclude_deps.update(bazel_rules[dep]['_TRANSITIVE_DEPS'])
continue
# This dep is an external target, add it as a dependency
if external_dep_name_maybe is not None:
transitive_deps.update([external_dep_name_maybe])
collapsed_deps.update(collapsed_deps, [external_dep_name_maybe])
continue
# Direct dependencies are part of transitive dependencies
transitive_deps.update(direct_deps)
# Calculate transitive public deps (needed for collapsing sources)
transitive_public_deps = set(
[x for x in transitive_deps if x in bazel_label_to_dep_name])
# Remove intermediate targets that our public dependencies already depend
# on. This is the step that further shorten the deps list.
collapsed_deps = set([x for x in collapsed_deps if x not in exclude_deps])
# Compute the final source files and headers for this build target whose
# name is `rule_name` (input argument of this function).
#
# Imaging a public target PX has transitive deps [IA, IB, PY, IC, PZ]. PX,
# PY and PZ are public build targets. And IA, IB, IC are intermediate
# targets. In addition, PY depends on IC.
#
# Translate the condition into dependency graph:
# PX -> [IA, IB, PY, IC, PZ]
# PY -> [IC]
# Public targets: [PX, PY, PZ]
#
# The collapsed dependencies of PX: [PY, PZ].
# The excluded dependencies of X: [PY, IC, PZ].
# (IC is excluded as a dependency of PX. It is already included in PY, hence
# it would be redundant to include it again.)
#
# Target PX should include source files and headers of [PX, IA, IB] as final
# build metadata.
for dep in transitive_deps:
if dep not in exclude_deps and dep not in transitive_public_deps:
if dep in bazel_rules:
collapsed_srcs.update(_extract_sources(bazel_rules[dep]))
collapsed_public_headers.update(
_extract_public_headers(bazel_rules[dep]))
collapsed_headers.update(
_extract_nonpublic_headers(bazel_rules[dep]))
# This item is a "visited" flag
bazel_rule['_PROCESSING_DONE'] = True
# Following items are described in the docstinrg.
bazel_rule['_TRANSITIVE_DEPS'] = list(sorted(transitive_deps))
bazel_rule['_COLLAPSED_DEPS'] = list(sorted(collapsed_deps))
bazel_rule['_COLLAPSED_SRCS'] = list(sorted(collapsed_srcs))
bazel_rule['_COLLAPSED_PUBLIC_HEADERS'] = list(
sorted(collapsed_public_headers))
bazel_rule['_COLLAPSED_HEADERS'] = list(sorted(collapsed_headers))
bazel_rule['_EXCLUDE_DEPS'] = list(sorted(exclude_deps))
# TODO(jtattermusch): deduplicate with transitive_dependencies.py (which has a slightly different logic)
# TODO(jtattermusch): This is done to avoid introducing too many intermediate
# libraries into the build.yaml-based builds (which might in cause issues
# building language-specific artifacts) and also because the libraries
# in build.yaml-based build are generally considered units of distributions
# (= public libraries that are visible to the user and are installable),
# while in bazel builds it is customary to define larger number of smaller
# "sublibraries". The need for elision (and expansion)
# of intermediate libraries can be re-evaluated in the future.
def _populate_transitive_metadata(bazel_rules: Any,
public_dep_names: Iterable[str]) -> None:
"""Add 'transitive_deps' field for each of the rules"""
# Create the map between Bazel label and public dependency name
bazel_label_to_dep_name = {}
for dep_name in public_dep_names:
bazel_label_to_dep_name[_get_bazel_label(dep_name)] = dep_name
# Make sure we reached all the Bazel rules
# TODO(lidiz) potentially we could only update a subset of rules
for rule_name in bazel_rules:
if '_PROCESSING_DONE' not in bazel_rules[rule_name]:
_compute_transitive_metadata(rule_name, bazel_rules,
bazel_label_to_dep_name)
def update_test_metadata_with_transitive_metadata(
all_extra_metadata: BuildDict, bazel_rules: BuildDict) -> None:
"""Patches test build metadata with transitive metadata."""
for lib_name, lib_dict in list(all_extra_metadata.items()):
# Skip if it isn't not an test
if lib_dict.get('build') != 'test' or lib_dict.get('_TYPE') != 'target':
continue
bazel_rule = bazel_rules[_get_bazel_label(lib_name)]
if '//external:benchmark' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['benchmark'] = True
lib_dict['defaults'] = 'benchmark'
if '//external:gtest' in bazel_rule['_TRANSITIVE_DEPS']:
lib_dict['gtest'] = True
lib_dict['language'] = 'c++'
def _get_transitive_protos(bazel_rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = bazel_rules.get(name, None)
if rule:
for dep in rule['deps']:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule['srcs']:
if src.endswith('.proto'):
ret.append(src)
return list(set(ret))
def _expand_upb_proto_library_rules(bazel_rules):
# Expand the .proto files from UPB proto library rules into the pre-generated
# upb.h and upb.c files.
GEN_UPB_ROOT = '//:src/core/ext/upb-generated/'
GEN_UPBDEFS_ROOT = '//:src/core/ext/upbdefs-generated/'
EXTERNAL_LINKS = [('@com_google_protobuf//', ':src/'),
('@com_google_googleapis//', ''),
('@com_github_cncf_udpa//', ''),
('@com_envoyproxy_protoc_gen_validate//', ''),
('@envoy_api//', ''), ('@opencensus_proto//', '')]
for name, bazel_rule in bazel_rules.items():
gen_func = bazel_rule.get('generator_function', None)
if gen_func in ('grpc_upb_proto_library',
'grpc_upb_proto_reflection_library'):
# get proto dependency
deps = bazel_rule['deps']
if len(deps) != 1:
raise Exception(
'upb rule "{0}" should have 1 proto dependency but has "{1}"'
.format(name, deps))
# deps is not properly fetched from bazel query for upb_proto_library target
# so add the upb dependency manually
bazel_rule['deps'] = [
'//external:upb_lib', '//external:upb_lib_descriptor',
'//external:upb_generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me'
]
# populate the upb_proto_library rule with pre-generated upb headers
# and sources using proto_rule
protos = _get_transitive_protos(bazel_rules, deps[0])
if len(protos) == 0:
raise Exception(
'upb rule "{0}" should have at least one proto file.'.
format(name))
srcs = []
hdrs = []
for proto_src in protos:
for external_link in EXTERNAL_LINKS:
if proto_src.startswith(external_link[0]):
proto_src = proto_src[len(external_link[0]) +
len(external_link[1]):]
break
if proto_src.startswith('@'):
raise Exception('"{0}" is unknown workspace.'.format(name))
proto_src = _extract_source_file_path(proto_src)
ext = '.upb' if gen_func == 'grpc_upb_proto_library' else '.upbdefs'
root = GEN_UPB_ROOT if gen_func == 'grpc_upb_proto_library' else GEN_UPBDEFS_ROOT
srcs.append(root + proto_src.replace('.proto', ext + '.c'))
hdrs.append(root + proto_src.replace('.proto', ext + '.h'))
bazel_rule['srcs'] = srcs
bazel_rule['hdrs'] = hdrs
def _generate_build_metadata(build_extra_metadata: BuildDict,
bazel_rules: BuildDict) -> BuildDict:
"""Generate build metadata in build.yaml-like format bazel build metadata and build.yaml-specific "extra metadata"."""
lib_names = list(build_extra_metadata.keys())
result = {}
for lib_name in lib_names:
lib_dict = _create_target_from_bazel_rule(lib_name, bazel_rules)
# populate extra properties from the build.yaml-specific "extra metadata"
lib_dict.update(build_extra_metadata.get(lib_name, {}))
# store to results
result[lib_name] = lib_dict
# Rename targets marked with "_RENAME" extra metadata.
# This is mostly a cosmetic change to ensure that we end up with build.yaml target
# names we're used to from the past (and also to avoid too long target names).
# The rename step needs to be made after we're done with most of processing logic
# otherwise the already-renamed libraries will have different names than expected
for lib_name in lib_names:
to_name = build_extra_metadata.get(lib_name, {}).get('_RENAME', None)
if to_name:
# store lib under the new name and also change its 'name' property
if to_name in result:
raise Exception('Cannot rename target ' + str(lib_name) + ', ' +
str(to_name) + ' already exists.')
lib_dict = result.pop(lib_name)
lib_dict['name'] = to_name
result[to_name] = lib_dict
# dep names need to be updated as well
for lib_dict_to_update in list(result.values()):
lib_dict_to_update['deps'] = list([
to_name if dep == lib_name else dep
for dep in lib_dict_to_update['deps']
])
return result
def _convert_to_build_yaml_like(lib_dict: BuildMetadata) -> BuildYaml:
lib_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'library'
]
target_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'target'
]
test_names = [
lib_name for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get('_TYPE', 'library') == 'test'
]
# list libraries and targets in predefined order
lib_list = [lib_dict[lib_name] for lib_name in lib_names]
target_list = [lib_dict[lib_name] for lib_name in target_names]
test_list = [lib_dict[lib_name] for lib_name in test_names]
# get rid of temporary private fields prefixed with "_" and some other useless fields
for lib in lib_list:
for field_to_remove in [
k for k in list(lib.keys()) if k.startswith('_')
]:
lib.pop(field_to_remove, None)
for target in target_list:
for field_to_remove in [
k for k in list(target.keys()) if k.startswith('_')
]:
target.pop(field_to_remove, None)
target.pop('public_headers',
None) # public headers make no sense for targets
for test in test_list:
for field_to_remove in [
k for k in list(test.keys()) if k.startswith('_')
]:
test.pop(field_to_remove, None)
test.pop('public_headers',
None) # public headers make no sense for tests
build_yaml_like = {
'libs': lib_list,
'filegroups': [],
'targets': target_list,
'tests': test_list,
}
return build_yaml_like
def _extract_cc_tests(bazel_rules: BuildDict) -> List[str]:
"""Gets list of cc_test tests from bazel rules"""
result = []
for bazel_rule in list(bazel_rules.values()):
if bazel_rule['class'] == 'cc_test':
test_name = bazel_rule['name']
if test_name.startswith('//'):
prefixlen = len('//')
result.append(test_name[prefixlen:])
return list(sorted(result))
def _exclude_unwanted_cc_tests(tests: List[str]) -> List[str]:
"""Filters out bazel tests that we don't want to run with other build systems or we cannot build them reasonably"""
# most qps tests are autogenerated, we are fine without them
tests = [test for test in tests if not test.startswith('test/cpp/qps:')]
# microbenchmarks aren't needed for checking correctness
tests = [
test for test in tests
if not test.startswith('test/cpp/microbenchmarks:')
]
tests = [
test for test in tests
if not test.startswith('test/core/promise/benchmark:')
]
# we have trouble with census dependency outside of bazel
tests = [
test for test in tests
if not test.startswith('test/cpp/ext/filters/census:') and
not test.startswith('test/core/xds:xds_channel_stack_modifier_test')
]
# missing opencensus/stats/stats.h
tests = [
test for test in tests if not test.startswith(
'test/cpp/end2end:server_load_reporting_end2end_test')
]
tests = [
test for test in tests if not test.startswith(
'test/cpp/server/load_reporter:lb_load_reporter_test')
]
# The test uses --running_under_bazel cmdline argument
# To avoid the trouble needing to adjust it, we just skip the test
tests = [
test for test in tests if not test.startswith(
'test/cpp/naming:resolver_component_tests_runner_invoker')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:time_change_test')
]
# the test requires 'client_crash_test_server' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:client_crash_test')
]
# the test requires 'server_crash_test_client' to be built
tests = [
test for test in tests
if not test.startswith('test/cpp/end2end:server_crash_test')
]
# test never existed under build.yaml and it fails -> skip it
tests = [
test for test in tests
if not test.startswith('test/core/tsi:ssl_session_cache_test')
]
# the binary of this test does not get built with cmake
tests = [
test for test in tests
if not test.startswith('test/cpp/util:channelz_sampler_test')
]
# we don't need to generate fuzzers outside of bazel
tests = [test for test in tests if not test.endswith('_fuzzer')]
return tests
def _generate_build_extra_metadata_for_tests(
tests: List[str], bazel_rules: BuildDict) -> BuildDict:
"""For given tests, generate the "extra metadata" that we need for our "build.yaml"-like output. The extra metadata is generated from the bazel rule metadata by using a bunch of heuristics."""
test_metadata = {}
for test in tests:
test_dict = {'build': 'test', '_TYPE': 'target'}
bazel_rule = bazel_rules[_get_bazel_label(test)]
bazel_tags = bazel_rule['tags']
if 'manual' in bazel_tags:
# don't run the tests marked as "manual"
test_dict['run'] = False
if bazel_rule['flaky']:
# don't run tests that are marked as "flaky" under bazel
# because that would only add noise for the run_tests.py tests
# and seeing more failures for tests that we already know are flaky
# doesn't really help anything
test_dict['run'] = False
if 'no_uses_polling' in bazel_tags:
test_dict['uses_polling'] = False
if 'grpc_fuzzer' == bazel_rule['generator_function']:
# currently we hand-list fuzzers instead of generating them automatically
# because there's no way to obtain maxlen property from bazel BUILD file.
print(('skipping fuzzer ' + test))
continue
if 'bazel_only' in bazel_tags:
continue
# if any tags that restrict platform compatibility are present,
# generate the "platforms" field accordingly
# TODO(jtattermusch): there is also a "no_linux" tag, but we cannot take
# it into account as it is applied by grpc_cc_test when poller expansion
# is made (for tests where uses_polling=True). So for now, we just
# assume all tests are compatible with linux and ignore the "no_linux" tag
# completely.
known_platform_tags = set(['no_windows', 'no_mac'])
if set(bazel_tags).intersection(known_platform_tags):
platforms = []
# assume all tests are compatible with linux and posix
platforms.append('linux')
platforms.append(
'posix') # there is no posix-specific tag in bazel BUILD
if not 'no_mac' in bazel_tags:
platforms.append('mac')
if not 'no_windows' in bazel_tags:
platforms.append('windows')
test_dict['platforms'] = platforms
cmdline_args = bazel_rule['args']
if cmdline_args:
test_dict['args'] = list(cmdline_args)
if test.startswith('test/cpp'):
test_dict['language'] = 'c++'
elif test.startswith('test/core'):
test_dict['language'] = 'c'
else:
raise Exception('wrong test' + test)
# short test name without the path.
# There can be name collisions, but we will resolve them later
simple_test_name = os.path.basename(_extract_source_file_path(test))
test_dict['_RENAME'] = simple_test_name
test_metadata[test] = test_dict
# detect duplicate test names
tests_by_simple_name = {}
for test_name, test_dict in list(test_metadata.items()):
simple_test_name = test_dict['_RENAME']
if not simple_test_name in tests_by_simple_name:
tests_by_simple_name[simple_test_name] = []
tests_by_simple_name[simple_test_name].append(test_name)
# choose alternative names for tests with a name collision
for collision_list in list(tests_by_simple_name.values()):
if len(collision_list) > 1:
for test_name in collision_list:
long_name = test_name.replace('/', '_').replace(':', '_')
print((
'short name of "%s" collides with another test, renaming to %s'
% (test_name, long_name)))
test_metadata[test_name]['_RENAME'] = long_name
return test_metadata
def _detect_and_print_issues(build_yaml_like: BuildYaml) -> None:
"""Try detecting some unusual situations and warn about them."""
for tgt in build_yaml_like['targets']:
if tgt['build'] == 'test':
for src in tgt['src']:
if src.startswith('src/') and not src.endswith('.proto'):
print(('source file from under "src/" tree used in test ' +
tgt['name'] + ': ' + src))
# extra metadata that will be used to construct build.yaml
# there are mostly extra properties that we weren't able to obtain from the bazel build
# _TYPE: whether this is library, target or test
# _RENAME: whether this target should be renamed to a different name (to match expectations of make and cmake builds)
_BUILD_EXTRA_METADATA = {
'third_party/address_sorting:address_sorting': {
'language': 'c',
'build': 'all',
'_RENAME': 'address_sorting'
},
'gpr': {
'language': 'c',
'build': 'all',
},
'grpc': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpc++': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
'grpc++_alts': {
'language': 'c++',
'build': 'all',
'baselib': True
},
'grpc++_error_details': {
'language': 'c++',
'build': 'all'
},
'grpc++_reflection': {
'language': 'c++',
'build': 'all'
},
'grpc++_unsecure': {
'language': 'c++',
'build': 'all',
'baselib': True,
},
# TODO(jtattermusch): do we need to set grpc_csharp_ext's LDFLAGS for wrapping memcpy in the same way as in build.yaml?
'grpc_csharp_ext': {
'language': 'c',
'build': 'all',
},
'grpc_unsecure': {
'language': 'c',
'build': 'all',
'baselib': True,
'generate_plugin_registry': True
},
'grpcpp_channelz': {
'language': 'c++',
'build': 'all'
},
'grpc++_test': {
'language': 'c++',
'build': 'private',
},
'src/compiler:grpc_plugin_support': {
'language': 'c++',
'build': 'protoc',
'_RENAME': 'grpc_plugin_support'
},
'src/compiler:grpc_cpp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_cpp_plugin'
},
'src/compiler:grpc_csharp_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_csharp_plugin'
},
'src/compiler:grpc_node_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_node_plugin'
},
'src/compiler:grpc_objective_c_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_objective_c_plugin'
},
'src/compiler:grpc_php_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_php_plugin'
},
'src/compiler:grpc_python_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_python_plugin'
},
'src/compiler:grpc_ruby_plugin': {
'language': 'c++',
'build': 'protoc',
'_TYPE': 'target',
'_RENAME': 'grpc_ruby_plugin'
},
# TODO(jtattermusch): consider adding grpc++_core_stats
# test support libraries
'test/core/util:grpc_test_util': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util'
},
'test/core/util:grpc_test_util_unsecure': {
'language': 'c',
'build': 'private',
'_RENAME': 'grpc_test_util_unsecure'
},
# TODO(jtattermusch): consider adding grpc++_test_util_unsecure - it doesn't seem to be used by bazel build (don't forget to set secure: False)
'test/cpp/util:test_config': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_config'
},
'test/cpp/util:test_util': {
'language': 'c++',
'build': 'private',
'_RENAME': 'grpc++_test_util'
},
# end2end test support libraries
'test/core/end2end:end2end_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_tests'
},
'test/core/end2end:end2end_nosec_tests': {
'language': 'c',
'build': 'private',
'_RENAME': 'end2end_nosec_tests'
},
# benchmark support libraries
'test/cpp/microbenchmarks:helpers': {
'language': 'c++',
'build': 'test',
'defaults': 'benchmark',
'_RENAME': 'benchmark_helpers'
},
'test/cpp/interop:interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_client'
},
'test/cpp/interop:interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'interop_server'
},
'test/cpp/interop:xds_interop_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_client'
},
'test/cpp/interop:xds_interop_server': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'xds_interop_server'
},
'test/cpp/interop:http2_client': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'http2_client'
},
'test/cpp/qps:qps_json_driver': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_json_driver'
},
'test/cpp/qps:qps_worker': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'qps_worker'
},
'test/cpp/util:grpc_cli': {
'language': 'c++',
'build': 'test',
'run': False,
'_TYPE': 'target',
'_RENAME': 'grpc_cli'
},
# TODO(jtattermusch): create_jwt and verify_jwt breaks distribtests because it depends on grpc_test_utils and thus requires tests to be built
# For now it's ok to disable them as these binaries aren't very useful anyway.
#'test/core/security:create_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_create_jwt' },
#'test/core/security:verify_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_verify_jwt' },
# TODO(jtattermusch): add remaining tools such as grpc_print_google_default_creds_token (they are not used by bazel build)
# TODO(jtattermusch): these fuzzers had no build.yaml equivalent
# test/core/compression:message_compress_fuzzer
# test/core/compression:message_decompress_fuzzer
# test/core/compression:stream_compression_fuzzer
# test/core/compression:stream_decompression_fuzzer
# test/core/slice:b64_decode_fuzzer
# test/core/slice:b64_encode_fuzzer
}
# We need a complete picture of all the targets and dependencies we're interested in
# so we run multiple bazel queries and merge the results.
_BAZEL_DEPS_QUERIES = [
'deps("//test/...")',
'deps("//:all")',
'deps("//src/compiler/...")',
'deps("//src/proto/...")',
# The ^ is needed to differentiate proto_library from go_proto_library
'deps(kind("^proto_library", @envoy_api//envoy/...))',
]
# Step 1: run a bunch of "bazel query --output xml" queries to collect
# the raw build metadata from the bazel build.
# At the end of this step we will have a dictionary of bazel rules
# that are interesting to us (libraries, binaries, etc.) along
# with their most important metadata (sources, headers, dependencies)
#
# Example of a single bazel rule after being populated:
# '//:grpc' : { 'class': 'cc_library',
# 'hdrs': ['//:include/grpc/byte_buffer.h', ... ],
# 'srcs': ['//:src/core/lib/surface/init.cc', ... ],
# 'deps': ['//:grpc_common', ...],
# ... }
bazel_rules = {}
for query in _BAZEL_DEPS_QUERIES:
bazel_rules.update(
_extract_rules_from_bazel_xml(_bazel_query_xml_tree(query)))
# Step 1.5: The sources for UPB protos are pre-generated, so we want
# to expand the UPB proto library bazel rules into the generated
# .upb.h and .upb.c files.
_expand_upb_proto_library_rules(bazel_rules)
# Step 2: Extract the known bazel cc_test tests. While most tests
# will be buildable with other build systems just fine, some of these tests
# would be too difficult to build and run with other build systems,
# so we simply exclude the ones we don't want.
# Note that while making tests buildable with other build systems
# than just bazel is extra effort, we still need to do that for these
# reasons:
# - If our cmake build doesn't have any tests at all, it's hard to make
# sure that what it built actually works (we need at least some "smoke tests").
# This is quite important because the build flags between bazel / non-bazel flag might differ
# (sometimes it's for interesting reasons that are not easy to overcome)
# which makes it even more important to have at least some tests for cmake/make
# - Our portability suite actually runs cmake tests and migration of portability
# suite fully towards bazel might be intricate (e.g. it's unclear whether it's
# possible to get a good enough coverage of different compilers / distros etc.
# with bazel)
# - some things that are considered "tests" in build.yaml-based builds are actually binaries
# we'd want to be able to build anyway (qps_json_worker, interop_client, interop_server, grpc_cli)
# so it's unclear how much make/cmake simplification we would gain by removing just some (but not all) test
# TODO(jtattermusch): Investigate feasibility of running portability suite with bazel.
tests = _exclude_unwanted_cc_tests(_extract_cc_tests(bazel_rules))
# Step 3: Generate the "extra metadata" for all our build targets.
# While the bazel rules give us most of the information we need,
# the legacy "build.yaml" format requires some additional fields that
# we cannot get just from bazel alone (we call that "extra metadata").
# In this step, we basically analyze the build metadata we have from bazel
# and use heuristics to determine (and sometimes guess) the right
# extra metadata to use for each target.
#
# - For some targets (such as the public libraries, helper libraries
# and executables) determining the right extra metadata is hard to do
# automatically. For these targets, the extra metadata is supplied "manually"
# in form of the _BUILD_EXTRA_METADATA dictionary. That allows us to match
# the semantics of the legacy "build.yaml" as closely as possible.
#
# - For test binaries, it is possible to generate the "extra metadata" mostly
# automatically using a rule-based heuristic approach because most tests
# look and behave alike from the build's perspective.
#
# TODO(jtattermusch): Of course neither "_BUILD_EXTRA_METADATA" or
# the heuristic approach used for tests are ideal and they cannot be made
# to cover all possible situations (and are tailored to work with the way
# the grpc build currently works), but the idea was to start with something
# reasonably simple that matches the "build.yaml"-like semantics as closely
# as possible (to avoid changing too many things at once) and gradually get
# rid of the legacy "build.yaml"-specific fields one by one. Once that is done,
# only very little "extra metadata" would be needed and/or it would be trivial
# to generate it automatically.
all_extra_metadata = {}
all_extra_metadata.update(_BUILD_EXTRA_METADATA)
all_extra_metadata.update(
_generate_build_extra_metadata_for_tests(tests, bazel_rules))
# Step 4: Compute the build metadata that will be used in the final build.yaml.
# The final build metadata includes transitive dependencies, and sources/headers
# expanded without intermediate dependencies.
# Example:
# '//:grpc' : { ...,
# '_TRANSITIVE_DEPS': ['//:gpr_base', ...],
# '_COLLAPSED_DEPS': ['gpr', ...],
# '_COLLAPSED_SRCS': [...],
# '_COLLAPSED_PUBLIC_HEADERS': [...],
# '_COLLAPSED_HEADERS': [...]
# }
_populate_transitive_metadata(bazel_rules, list(all_extra_metadata.keys()))
# Step 4a: Update the existing test metadata with the updated build metadata.
# Certain build metadata of certain test targets depend on the transitive
# metadata that wasn't available earlier.
update_test_metadata_with_transitive_metadata(all_extra_metadata, bazel_rules)
# Step 5: Generate the final metadata for all the targets.
# This is done by combining the bazel build metadata and the "extra metadata"
# we obtained in the previous step.
# In this step, we also perform some interesting massaging of the target metadata
# to end up with a result that is as similar to the legacy build.yaml data
# as possible.
# - Some targets get renamed (to match the legacy build.yaml target names)
# - Some intermediate libraries get elided ("expanded") to better match the set
# of targets provided by the legacy build.yaml build
#
# Originally the target renaming was introduced to address these concerns:
# - avoid changing too many things at the same time and avoid people getting
# confused by some well know targets suddenly being missing
# - Makefile/cmake and also language-specific generators rely on some build
# targets being called exactly the way they they are. Some of our testing
# scrips also invoke executables (e.g. "qps_json_driver") by their name.
# - The autogenerated test name from bazel includes the package path
# (e.g. "test_cpp_TEST_NAME"). Without renaming, the target names would
# end up pretty ugly (e.g. test_cpp_qps_qps_json_driver).
# TODO(jtattermusch): reevaluate the need for target renaming in the future.
#
# Example of a single generated target:
# 'grpc' : { 'language': 'c',
# 'public_headers': ['include/grpc/byte_buffer.h', ... ],
# 'headers': ['src/core/ext/filters/client_channel/client_channel.h', ... ],
# 'src': ['src/core/lib/surface/init.cc', ... ],
# 'deps': ['gpr', 'address_sorting', ...],
# ... }
all_targets_dict = _generate_build_metadata(all_extra_metadata, bazel_rules)
# Step 6: convert the dictionary with all the targets to a dict that has
# the desired "build.yaml"-like layout.
# TODO(jtattermusch): We use the custom "build.yaml"-like layout because
# currently all other build systems use that format as their source of truth.
# In the future, we can get rid of this custom & legacy format entirely,
# but we would need to update the generators for other build systems
# at the same time.
#
# Layout of the result:
# { 'libs': { TARGET_DICT_FOR_LIB_XYZ, ... },
# 'targets': { TARGET_DICT_FOR_BIN_XYZ, ... },
# 'tests': { TARGET_DICT_FOR_TEST_XYZ, ...} }
build_yaml_like = _convert_to_build_yaml_like(all_targets_dict)
# detect and report some suspicious situations we've seen before
_detect_and_print_issues(build_yaml_like)
# Step 7: Store the build_autogenerated.yaml in a deterministic (=sorted)
# and cleaned-up form.
# A basic overview of the resulting "build.yaml"-like format is here:
# https://github.com/grpc/grpc/blob/master/templates/README.md
# TODO(jtattermusch): The "cleanup" function is taken from the legacy
# build system (which used build.yaml) and can be eventually removed.
build_yaml_string = build_cleaner.cleaned_build_yaml_dict_as_string(
build_yaml_like)
with open('build_autogenerated.yaml', 'w') as file:
file.write(build_yaml_string)
|
genius1611/horizon
|
django-openstack/django_openstack/tests/api_tests.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cloudfiles
import httplib
import json
import mox
from django import http
from django.conf import settings
from django_openstack import api
from glance import client as glance_client
from mox import IsA
from novaclient import service_catalog, client as base_client
from novaclient.keystone import client as keystone_client
from novaclient.v1_1 import client as nova_client
from openstack import compute as OSCompute
from openstackx import admin as OSAdmin
from openstackx import auth as OSAuth
from openstackx import extras as OSExtras
from django_openstack import test
from django_openstack.middleware import keystone
TEST_CONSOLE_KIND = 'vnc'
TEST_EMAIL = 'test@test.com'
TEST_HOSTNAME = 'hostname'
TEST_INSTANCE_ID = '2'
TEST_PASSWORD = '12345'
TEST_PORT = 8000
TEST_RETURN = 'retValue'
TEST_TENANT_DESCRIPTION = 'tenantDescription'
TEST_TENANT_ID = '1234'
TEST_TENANT_NAME = 'foo'
TEST_TOKEN = 'aToken'
TEST_TOKEN_ID = 'userId'
TEST_URL = 'http://%s:%s/something/v1.0' % (TEST_HOSTNAME, TEST_PORT)
TEST_USERNAME = 'testUser'
class Server(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, image, attrs=None):
self.id = id
self.image = image
if attrs is not None:
self.attrs = attrs
def __eq__(self, other):
if self.id != other.id or \
self.image['id'] != other.image['id']:
return False
for k in self.attrs:
if other.attrs.__getattr__(k) != v:
return False
return True
def __ne__(self, other):
return not self == other
class Tenant(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, description, enabled):
self.id = id
self.description = description
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.description == other.description and \
self.enabled == other.enabled
def __ne__(self, other):
return not self == other
class Token(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, username, tenant_id, tenant_name,
serviceCatalog=None):
self.id = id
self.user = {'name': username}
self.tenant = {'id': tenant_id, 'name': tenant_name}
self.serviceCatalog = serviceCatalog
def __eq__(self, other):
return self.id == other.id and \
self.user['name'] == other.user['name'] and \
self.tenant_id == other.tenant_id and \
self.serviceCatalog == other.serviceCatalog
def __ne__(self, other):
return not self == other
class APIResource(api.APIResourceWrapper):
""" Simple APIResource for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api.APIDictWrapper):
""" Simple APIDict for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
class APITestCase(test.TestCase):
def setUp(self):
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None):
return self.stub_keystoneclient()
super(APITestCase, self).setUp()
self._original_keystoneclient = api.keystoneclient
self._original_novaclient = api.novaclient
api.keystoneclient = fake_keystoneclient
api.novaclient = lambda request: self.stub_novaclient()
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def tearDown(self):
super(APITestCase, self).tearDown()
api.novaclient = self._original_novaclient
api.keystoneclient = self._original_keystoneclient
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual(resource.foo, 'foo')
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual(resource.foo, 'foo')
self.assertEqual(resource['foo'], 'foo')
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual(resource.get('foo'), 'foo')
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
# Wrapper classes that only define _attrs don't need extra testing.
# Wrapper classes that have other attributes or methods need testing
class ImageWrapperTests(test.TestCase):
dict_with_properties = {
'properties':
{'image_state': 'running'},
'size': 100,
}
dict_without_properties = {
'size': 100,
}
def test_get_properties(self):
image = api.Image(self.dict_with_properties)
image_props = image.properties
self.assertIsInstance(image_props, api.ImageProperties)
self.assertEqual(image_props.image_state, 'running')
def test_get_other(self):
image = api.Image(self.dict_with_properties)
self.assertEqual(image.size, 100)
def test_get_properties_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
image.properties
def test_get_other_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', image._attrs,
msg="Test assumption broken. Find new missing attribute")
image.missing
class ServerWrapperTests(test.TestCase):
HOST = 'hostname'
ID = '1'
IMAGE_NAME = 'imageName'
IMAGE_OBJ = {'id': '3', 'links': [{'href': '3', u'rel': u'bookmark'}]}
def setUp(self):
super(ServerWrapperTests, self).setUp()
# these are all objects "fetched" from the api
self.inner_attrs = {'host': self.HOST}
self.inner_server = Server(self.ID, self.IMAGE_OBJ, self.inner_attrs)
self.inner_server_no_attrs = Server(self.ID, self.IMAGE_OBJ)
#self.request = self.mox.CreateMock(http.HttpRequest)
def test_get_attrs(self):
server = api.Server(self.inner_server, self.request)
attrs = server.attrs
# for every attribute in the "inner" object passed to the api wrapper,
# see if it can be accessed through the api.ServerAttribute instance
for k in self.inner_attrs:
self.assertEqual(attrs.__getattr__(k), self.inner_attrs[k])
def test_get_other(self):
server = api.Server(self.inner_server, self.request)
self.assertEqual(server.id, self.ID)
def test_get_attrs_missing(self):
server = api.Server(self.inner_server_no_attrs, self.request)
with self.assertRaises(AttributeError):
server.attrs
def test_get_other_missing(self):
server = api.Server(self.inner_server, self.request)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', server._attrs,
msg="Test assumption broken. Find new missing attribute")
server.missing
def test_image_name(self):
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
self.IMAGE_OBJ['id']
).AndReturn(api.Image({'name': self.IMAGE_NAME}))
server = api.Server(self.inner_server, self.request)
self.mox.ReplayAll()
image_name = server.image_name
self.assertEqual(image_name, self.IMAGE_NAME)
self.mox.VerifyAll()
class ApiHelperTests(test.TestCase):
""" Tests for functions that don't use one of the api objects """
def test_url_for(self):
GLANCE_URL = 'http://glance/glanceapi/'
NOVA_URL = 'http://nova/novapi/'
url = api.url_for(self.request, 'image')
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=False)
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=True)
self.assertEqual(url, GLANCE_URL + 'admin')
url = api.url_for(self.request, 'compute')
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=False)
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=True)
self.assertEqual(url, NOVA_URL + 'admin')
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(api.ServiceCatalogException):
url = api.url_for(self.request, 'notAnApi')
class TenantAPITests(APITestCase):
def test_tenant_create(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.create(TEST_TENANT_ID, DESCRIPTION,
ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_create(self.request, TEST_TENANT_ID,
DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.get(TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_get(self.request, TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_list(self):
tenants = (TEST_RETURN, TEST_RETURN + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.list().AndReturn(tenants)
self.mox.ReplayAll()
ret_val = api.tenant_list(self.request)
self.assertEqual(len(ret_val), len(tenants))
for tenant in ret_val:
self.assertIsInstance(tenant, api.Tenant)
self.assertIn(tenant._apiresource, tenants)
self.mox.VerifyAll()
def test_tenant_update(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.update(TEST_TENANT_ID, TEST_TENANT_NAME,
DESCRIPTION, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_update(self.request, TEST_TENANT_ID,
TEST_TENANT_NAME, DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class UserAPITests(APITestCase):
def test_user_create(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.create(TEST_USERNAME, TEST_PASSWORD, TEST_EMAIL,
TEST_TENANT_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_create(self.request, TEST_USERNAME, TEST_EMAIL,
TEST_PASSWORD, TEST_TENANT_ID, True)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_delete(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.delete(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_delete(self.request, TEST_USERNAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_user_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.get(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_get(self.request, TEST_USERNAME)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_list(self):
users = (TEST_USERNAME, TEST_USERNAME + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.list(tenant_id=None).AndReturn(users)
self.mox.ReplayAll()
ret_val = api.user_list(self.request)
self.assertEqual(len(ret_val), len(users))
for user in ret_val:
self.assertIsInstance(user, api.User)
self.assertIn(user._apiresource, users)
self.mox.VerifyAll()
def test_user_update_email(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_email(TEST_USERNAME,
TEST_EMAIL).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_email(self.request, TEST_USERNAME,
TEST_EMAIL)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_password(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_password(TEST_USERNAME,
TEST_PASSWORD).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_password(self.request, TEST_USERNAME,
TEST_PASSWORD)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_tenant(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_tenant(TEST_USERNAME,
TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_tenant(self.request, TEST_USERNAME,
TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class RoleAPITests(APITestCase):
def test_role_add_for_tenant_user(self):
keystoneclient = self.stub_keystoneclient()
role = api.Role(APIResource.get_instance())
role.id = TEST_RETURN
role.name = TEST_RETURN
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.add_user_to_tenant(TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN).AndReturn(role)
api._get_role = self.mox.CreateMockAnything()
api._get_role(IsA(http.HttpRequest), IsA(str)).AndReturn(role)
self.mox.ReplayAll()
ret_val = api.role_add_for_tenant_user(self.request,
TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN)
self.assertEqual(ret_val, role)
self.mox.VerifyAll()
class AdminApiTests(APITestCase):
def stub_admin_api(self, count=1):
self.mox.StubOutWithMock(api, 'admin_api')
admin_api = self.mox.CreateMock(OSAdmin.Admin)
for i in range(count):
api.admin_api(IsA(http.HttpRequest)).AndReturn(admin_api)
return admin_api
def test_get_admin_api(self):
self.mox.StubOutClassWithMocks(OSAdmin, 'Admin')
OSAdmin.Admin(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.admin_api(self.request))
self.mox.VerifyAll()
def test_flavor_create(self):
FLAVOR_DISK = 1000
FLAVOR_ID = 6
FLAVOR_MEMORY = 1024
FLAVOR_NAME = 'newFlavor'
FLAVOR_VCPU = 2
admin_api = self.stub_admin_api()
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.create(FLAVOR_NAME, FLAVOR_MEMORY, FLAVOR_VCPU,
FLAVOR_DISK, FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_create(self.request, FLAVOR_NAME,
str(FLAVOR_MEMORY), str(FLAVOR_VCPU),
str(FLAVOR_DISK), FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_flavor_delete(self):
FLAVOR_ID = 6
admin_api = self.stub_admin_api(count=2)
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.delete(FLAVOR_ID, False).AndReturn(TEST_RETURN)
admin_api.flavors.delete(FLAVOR_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_delete(self.request, FLAVOR_ID)
self.assertIsNone(ret_val)
ret_val = api.flavor_delete(self.request, FLAVOR_ID, purge=True)
self.assertIsNone(ret_val)
def test_service_get(self):
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.get(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_get(self.request, NAME)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_service_list(self):
services = (TEST_RETURN, TEST_RETURN + '2')
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.list().AndReturn(services)
self.mox.ReplayAll()
ret_val = api.service_list(self.request)
for service in ret_val:
self.assertIsInstance(service, api.Services)
self.assertIn(service._apiresource, services)
self.mox.VerifyAll()
def test_service_update(self):
ENABLED = True
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.update(NAME, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_update(self.request, NAME, ENABLED)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class TokenApiTests(APITestCase):
def setUp(self):
super(TokenApiTests, self).setUp()
self._prev_OPENSTACK_KEYSTONE_URL = getattr(settings,
'OPENSTACK_KEYSTONE_URL',
None)
settings.OPENSTACK_KEYSTONE_URL = TEST_URL
def tearDown(self):
super(TokenApiTests, self).tearDown()
settings.OPENSTACK_KEYSTONE_URL = self._prev_OPENSTACK_KEYSTONE_URL
def test_token_create(self):
catalog = {
'access': {
'token': {
'id': TEST_TOKEN_ID,
},
'user': {
'roles': [],
}
}
}
test_token = Token(TEST_TOKEN_ID, TEST_USERNAME,
TEST_TENANT_ID, TEST_TENANT_NAME)
keystoneclient = self.stub_keystoneclient()
keystoneclient.tokens = self.mox.CreateMockAnything()
keystoneclient.tokens.authenticate(username=TEST_USERNAME,
password=TEST_PASSWORD,
tenant=TEST_TENANT_ID
).AndReturn(test_token)
self.mox.ReplayAll()
ret_val = api.token_create(self.request, TEST_TENANT_ID,
TEST_USERNAME, TEST_PASSWORD)
self.assertEqual(test_token.tenant['id'], ret_val.tenant['id'])
self.mox.VerifyAll()
class ComputeApiTests(APITestCase):
def stub_compute_api(self, count=1):
self.mox.StubOutWithMock(api, 'compute_api')
compute_api = self.mox.CreateMock(OSCompute.Compute)
for i in range(count):
api.compute_api(IsA(http.HttpRequest)).AndReturn(compute_api)
return compute_api
def test_get_compute_api(self):
class ComputeClient(object):
__slots__ = ['auth_token', 'management_url']
self.mox.StubOutClassWithMocks(OSCompute, 'Compute')
compute_api = OSCompute.Compute(auth_token=TEST_TOKEN,
management_url=TEST_URL)
compute_api.client = ComputeClient()
self.mox.StubOutWithMock(api, 'url_for')
# called three times? Looks like a good place for optimization
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
compute_api = api.compute_api(self.request)
self.assertIsNotNone(compute_api)
self.assertEqual(compute_api.client.auth_token, TEST_TOKEN)
self.assertEqual(compute_api.client.management_url, TEST_URL)
self.mox.VerifyAll()
def test_flavor_get(self):
FLAVOR_ID = 6
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.get(FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_get(self.request, FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_server_delete(self):
INSTANCE = 'anInstance'
compute_api = self.stub_compute_api()
compute_api.servers = self.mox.CreateMockAnything()
compute_api.servers.delete(INSTANCE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_delete(self.request, INSTANCE)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_reboot(self):
INSTANCE_ID = '2'
HARDNESS = 'diamond'
self.mox.StubOutWithMock(api, 'server_get')
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(OSCompute.servers.REBOOT_HARD).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(HARDNESS).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.server_reboot(self.request, INSTANCE_ID)
self.assertIsNone(ret_val)
ret_val = api.server_reboot(self.request, INSTANCE_ID,
hardness=HARDNESS)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_create(self):
NAME = 'server'
IMAGE = 'anImage'
FLAVOR = 'cherry'
USER_DATA = {'nuts': 'berries'}
KEY = 'user'
SECGROUP = self.mox.CreateMock(api.SecurityGroup)
server = self.mox.CreateMock(OSCompute.Server)
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create(NAME, IMAGE, FLAVOR, userdata=USER_DATA,
security_groups=[SECGROUP], key_name=KEY)\
.AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_create(self.request, NAME, IMAGE, FLAVOR,
KEY, USER_DATA, [SECGROUP])
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class ExtrasApiTests(APITestCase):
def stub_extras_api(self, count=1):
self.mox.StubOutWithMock(api, 'extras_api')
extras_api = self.mox.CreateMock(OSExtras.Extras)
for i in range(count):
api.extras_api(IsA(http.HttpRequest)).AndReturn(extras_api)
return extras_api
def test_get_extras_api(self):
self.mox.StubOutClassWithMocks(OSExtras, 'Extras')
OSExtras.Extras(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.extras_api(self.request))
self.mox.VerifyAll()
def test_console_create(self):
extras_api = self.stub_extras_api(count=2)
extras_api.consoles = self.mox.CreateMockAnything()
extras_api.consoles.create(
TEST_INSTANCE_ID, TEST_CONSOLE_KIND).AndReturn(TEST_RETURN)
extras_api.consoles.create(
TEST_INSTANCE_ID, 'text').AndReturn(TEST_RETURN + '2')
self.mox.ReplayAll()
ret_val = api.console_create(self.request,
TEST_INSTANCE_ID,
TEST_CONSOLE_KIND)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
ret_val = api.console_create(self.request, TEST_INSTANCE_ID)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN + '2')
self.mox.VerifyAll()
def test_flavor_list(self):
flavors = (TEST_RETURN, TEST_RETURN + '2')
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list().AndReturn(flavors)
self.mox.ReplayAll()
ret_val = api.flavor_list(self.request)
self.assertEqual(len(ret_val), len(flavors))
for flavor in ret_val:
self.assertIsInstance(flavor, api.Flavor)
self.assertIn(flavor._apiresource, flavors)
self.mox.VerifyAll()
def test_server_list(self):
servers = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.list().AndReturn(servers)
self.mox.ReplayAll()
ret_val = api.server_list(self.request)
self.assertEqual(len(ret_val), len(servers))
for server in ret_val:
self.assertIsInstance(server, api.Server)
self.assertIn(server._apiresource, servers)
self.mox.VerifyAll()
def test_usage_get(self):
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.get(TEST_TENANT_ID, 'start',
'end').AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.usage_get(self.request, TEST_TENANT_ID, 'start', 'end')
self.assertIsInstance(ret_val, api.Usage)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_usage_list(self):
usages = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.list('start', 'end').AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.usage_list(self.request, 'start', 'end')
self.assertEqual(len(ret_val), len(usages))
for usage in ret_val:
self.assertIsInstance(usage, api.Usage)
self.assertIn(usage._apiresource, usages)
self.mox.VerifyAll()
def test_server_get(self):
INSTANCE_ID = '2'
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.get(INSTANCE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_get(self.request, INSTANCE_ID)
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class VolumeTests(APITestCase):
def setUp(self):
super(VolumeTests, self).setUp()
volume = api.Volume(APIResource.get_instance())
volume.id = 1
volume.displayName = "displayName"
volume.attachments = [{"device": "/dev/vdb",
"serverId": 1,
"id": 1,
"volumeId": 1}]
self.volume = volume
self.volumes = [volume, ]
self.novaclient = self.stub_novaclient()
self.novaclient.volumes = self.mox.CreateMockAnything()
def test_volume_list(self):
self.novaclient.volumes.list().AndReturn(self.volumes)
self.mox.ReplayAll()
volumes = api.volume_list(self.request)
self.assertIsInstance(volumes[0], api.Volume)
self.mox.VerifyAll()
def test_volume_get(self):
self.novaclient.volumes.get(IsA(int)).AndReturn(self.volume)
self.mox.ReplayAll()
volume = api.volume_get(self.request, 1)
self.assertIsInstance(volume, api.Volume)
self.mox.VerifyAll()
def test_volume_instance_list(self):
self.novaclient.volumes.get_server_volumes(IsA(int)).AndReturn(
self.volume.attachments)
self.mox.ReplayAll()
attachments = api.volume_instance_list(self.request, 1)
self.assertEqual(attachments, self.volume.attachments)
self.mox.VerifyAll()
def test_volume_create(self):
self.novaclient.volumes.create(IsA(int), IsA(str), IsA(str)).AndReturn(
self.volume)
self.mox.ReplayAll()
new_volume = api.volume_create(self.request,
10,
"new volume",
"new description")
self.assertIsInstance(new_volume, api.Volume)
self.mox.VerifyAll()
def test_volume_delete(self):
self.novaclient.volumes.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_delete(self.request, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_attach(self):
self.novaclient.volumes.create_server_volume(
IsA(int), IsA(int), IsA(str))
self.mox.ReplayAll()
ret_val = api.volume_attach(self.request, 1, 1, "/dev/vdb")
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_detach(self):
self.novaclient.volumes.delete_server_volume(IsA(int), IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_detach(self.request, 1, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
class APIExtensionTests(APITestCase):
def setUp(self):
super(APIExtensionTests, self).setUp()
keypair = api.KeyPair(APIResource.get_instance())
keypair.id = 1
keypair.name = TEST_RETURN
self.keypair = keypair
self.keypairs = [keypair, ]
floating_ip = api.FloatingIp(APIResource.get_instance())
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = [floating_ip, ]
server = api.Server(APIResource.get_instance(), self.request)
server.id = 1
self.server = server
self.servers = [server, ]
def test_server_snapshot_create(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create_image(IsA(int), IsA(str)).\
AndReturn(self.server)
self.mox.ReplayAll()
server = api.snapshot_create(self.request, 1, 'test-snapshot')
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_tenant_floating_ip_list(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(self.floating_ips)
self.mox.ReplayAll()
floating_ips = api.tenant_floating_ip_list(self.request)
self.assertEqual(len(floating_ips), len(self.floating_ips))
self.assertIsInstance(floating_ips[0], api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_get(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_get(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_allocate(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create().AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_allocate(self.request)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_release(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(1).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_release(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_server_remove_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.remove_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_remove_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_server_add_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.add_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_add_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_keypair_create(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_create(self.request, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_import(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str), IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_import(self.request, TEST_RETURN, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_delete(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.keypair_delete(self.request, self.keypair.id)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_keypair_list(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.list().AndReturn(self.keypairs)
self.mox.ReplayAll()
ret_val = api.keypair_list(self.request)
self.assertEqual(len(ret_val), len(self.keypairs))
for keypair in ret_val:
self.assertIsInstance(keypair, api.KeyPair)
self.mox.VerifyAll()
class GlanceApiTests(APITestCase):
def stub_glance_api(self, count=1):
self.mox.StubOutWithMock(api, 'glance_api')
glance_api = self.mox.CreateMock(glance_client.Client)
glance_api.token = TEST_TOKEN
for i in range(count):
api.glance_api(IsA(http.HttpRequest)).AndReturn(glance_api)
return glance_api
def test_get_glance_api(self):
self.mox.StubOutClassWithMocks(glance_client, 'Client')
client_instance = glance_client.Client(TEST_HOSTNAME, TEST_PORT,
auth_tok=TEST_TOKEN)
# Normally ``auth_tok`` is set in ``Client.__init__``, but mox doesn't
# duplicate that behavior so we set it manually.
client_instance.auth_tok = TEST_TOKEN
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'image').AndReturn(TEST_URL)
self.mox.ReplayAll()
ret_val = api.glance_api(self.request)
self.assertIsNotNone(ret_val)
self.assertEqual(ret_val.auth_tok, TEST_TOKEN)
self.mox.VerifyAll()
def test_image_create(self):
IMAGE_FILE = 'someData'
IMAGE_META = {'metadata': 'foo'}
glance_api = self.stub_glance_api()
glance_api.add_image(IMAGE_META, IMAGE_FILE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_create(self.request, IMAGE_META, IMAGE_FILE)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
def test_image_delete(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.delete_image(IMAGE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_delete(self.request, IMAGE_ID)
self.assertEqual(ret_val, TEST_RETURN)
self.mox.VerifyAll()
def test_image_get(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.get_image(IMAGE_ID).AndReturn([TEST_RETURN])
self.mox.ReplayAll()
ret_val = api.image_get(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
def test_image_list_detailed(self):
images = (TEST_RETURN, TEST_RETURN + '2')
glance_api = self.stub_glance_api()
glance_api.get_images_detailed().AndReturn(images)
self.mox.ReplayAll()
ret_val = api.image_list_detailed(self.request)
self.assertEqual(len(ret_val), len(images))
for image in ret_val:
self.assertIsInstance(image, api.Image)
self.assertIn(image._apidict, images)
self.mox.VerifyAll()
def test_image_update(self):
IMAGE_ID = '1'
IMAGE_META = {'metadata': 'foobar'}
glance_api = self.stub_glance_api(count=2)
glance_api.update_image(IMAGE_ID, image_meta={}).AndReturn(TEST_RETURN)
glance_api.update_image(IMAGE_ID,
image_meta=IMAGE_META).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_update(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
ret_val = api.image_update(self.request,
IMAGE_ID,
image_meta=IMAGE_META)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
class SwiftApiTests(APITestCase):
def setUp(self):
self.mox = mox.Mox()
self.request = http.HttpRequest()
self.request.session = dict()
self.request.session['token'] = TEST_TOKEN
def tearDown(self):
self.mox.UnsetStubs()
def stub_swift_api(self, count=1):
self.mox.StubOutWithMock(api, 'swift_api')
swift_api = self.mox.CreateMock(cloudfiles.connection.Connection)
for i in range(count):
api.swift_api(IsA(http.HttpRequest)).AndReturn(swift_api)
return swift_api
def test_swift_get_containers(self):
containers = (TEST_RETURN, TEST_RETURN + '2')
swift_api = self.stub_swift_api()
swift_api.get_all_containers(limit=10000,
marker=None).AndReturn(containers)
self.mox.ReplayAll()
ret_val = api.swift_get_containers(self.request)
self.assertEqual(len(ret_val), len(containers))
for container in ret_val:
self.assertIsInstance(container, api.Container)
self.assertIn(container._apiresource, containers)
self.mox.VerifyAll()
def test_swift_create_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
self.mox.StubOutWithMock(api, 'swift_container_exists')
api.swift_container_exists(self.request,
NAME).AndReturn(False)
swift_api.create_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_create_container(self.request, NAME)
self.assertIsInstance(ret_val, api.Container)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_swift_delete_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
swift_api.delete_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_container(self.request, NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_objects(self):
NAME = 'containerName'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=None).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request, NAME)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_get_objects_with_prefix(self):
NAME = 'containerName'
PREFIX = 'prefacedWith'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=PREFIX).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request,
NAME,
prefix=PREFIX)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_upload_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'someData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.create_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.write(OBJECT_DATA).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_upload_object(self.request,
CONTAINER_NAME,
OBJECT_NAME,
OBJECT_DATA)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_delete_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.delete_object(OBJECT_NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_object(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_object_data(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'objectData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.stream().AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
ret_val = api.swift_get_object_data(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertEqual(ret_val, OBJECT_DATA)
self.mox.VerifyAll()
def test_swift_object_exists(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
self.mox.ReplayAll()
ret_val = api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertTrue(ret_val)
self.mox.VerifyAll()
def test_swift_copy_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
self.mox.StubOutWithMock(api, 'swift_object_exists')
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME).AndReturn(False)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.copy_to(CONTAINER_NAME, OBJECT_NAME)
self.mox.ReplayAll()
ret_val = api.swift_copy_object(self.request, CONTAINER_NAME,
OBJECT_NAME, CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
|
Onager/dftimewolf
|
dftimewolf/lib/collectors/grr_hosts.py
|
# -*- coding: utf-8 -*-
"""Definition of modules for collecting data from GRR hosts."""
import datetime
import os
import re
import threading
import time
import zipfile
from grr_api_client import errors as grr_errors
from grr_response_proto import flows_pb2, timeline_pb2
from dftimewolf.lib.collectors.grr_base import GRRBaseModule
from dftimewolf.lib.containers import containers
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
# TODO: GRRFlow should be extended by classes that actually implement
# the Process() method.
class GRRFlow(GRRBaseModule): # pylint: disable=abstract-method
"""Launches and collects GRR flows.
Modules that use GRR flows or interact with hosts should extend this class.
Attributes:
keepalive (bool): True if the GRR keepalive functionality should be used.
"""
_CHECK_APPROVAL_INTERVAL_SEC = 10
_CHECK_FLOW_INTERVAL_SEC = 10
_CLIENT_ID_REGEX = re.compile(r'^c\.[0-9a-f]{16}$', re.IGNORECASE)
def __init__(self, state, critical=False):
"""Initializes a GRR flow module.
Args:
state (DFTimewolfState): recipe state.
critical (Optional[bool]): True if the module is critical, which causes
the entire recipe to fail if the module encounters an error.
"""
super(GRRFlow, self).__init__(state, critical=critical)
self.keepalive = False
# TODO: change object to more specific GRR type information.
def _GetClientByHostname(self, hostname):
"""Searches GRR by hostname and get the latest active client.
Args:
hostname (str): hostname to search for.
Returns:
object: GRR API Client object
Raises:
DFTimewolfError: if no client ID found for hostname.
"""
# Search for the hostname in GRR
print('Searching for client: {0:s}'.format(hostname))
try:
search_result = self.grr_api.SearchClients(hostname)
except grr_errors.UnknownError as exception:
self.state.AddError('Could not search for host {0:s}: {1!s}'.format(
hostname, exception
), critical=True)
return None
result = []
for client in search_result:
if hostname.lower() in client.data.os_info.fqdn.lower():
result.append((client.data.last_seen_at, client))
if not result:
self.state.AddError('Could not get client_id for {0:s}'.format(
hostname), critical=True)
return None
last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0]
# Remove microseconds and create datetime object
last_seen_datetime = datetime.datetime.utcfromtimestamp(
last_seen / 1000000)
# Timedelta between now and when the client was last seen, in minutes.
# First, count total seconds. This will return a float.
last_seen_seconds = (
datetime.datetime.utcnow() - last_seen_datetime).total_seconds()
last_seen_minutes = int(round(last_seen_seconds / 60))
print('{0:s}: Found active client'.format(client.client_id))
print('Found active client: {0:s}'.format(client.client_id))
print('Client last seen: {0:s} ({1:d} minutes ago)'.format(
last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'),
last_seen_minutes))
return client
# TODO: change object to more specific GRR type information.
def _FindClients(self, hosts):
"""Finds GRR clients given a list of hosts.
Args:
hosts (list[str]): FQDNs of hosts.
Returns:
list[object]: GRR client objects.
"""
# TODO(tomchop): Thread this
clients = []
for host in hosts:
clients.append(self._GetClientByHostname(host))
return [client for client in clients if client is not None]
# TODO: change object to more specific GRR type information.
def _LaunchFlow(self, client, name, args):
"""Creates the specified flow, setting KeepAlive if requested.
Args:
client (object): GRR Client object on which to launch the flow.
name (str): name of the GRR flow.
args (object): arguments specific for type of flow, as defined in GRR
flow proto (FlowArgs).
Returns:
str: GRR identifier for launched flow, or an empty string if flow could
not be launched.
"""
# Start the flow and get the flow ID
flow = self._WrapGRRRequestWithApproval(
client, client.CreateFlow, name=name, args=args)
if not flow:
return ''
flow_id = flow.flow_id
print('{0:s}: Scheduled'.format(flow_id))
if self.keepalive:
keepalive_flow = client.CreateFlow(
name='KeepAlive', args=flows_pb2.KeepAliveArgs())
print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))
return flow_id
# TODO: change object to more specific GRR type information.
def _AwaitFlow(self, client, flow_id):
"""Waits for a specific GRR flow to complete.
Args:
client (object): GRR Client object in which to await the flow.
flow_id (str): GRR identifier of the flow to await.
Raises:
DFTimewolfError: if flow error encountered.
"""
print('{0:s}: Waiting to finish'.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = 'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower())
self.state.AddError(msg)
raise DFTimewolfError(
'Unable to stat flow {0:s} for host {1:s}'.format(
flow_id, client.data.os_info.fqdn.lower()))
if status.state == flows_pb2.FlowContext.ERROR:
# TODO(jbn): If one artifact fails, what happens? Test.
message = status.context.backtrace
if 'ArtifactNotRegisteredError' in status.context.backtrace:
message = status.context.backtrace.split('\n')[-2]
raise DFTimewolfError(
'{0:s}: FAILED! Message from GRR:\n{1:s}'.format(
flow_id, message))
if status.state == flows_pb2.FlowContext.TERMINATED:
print('{0:s}: Complete'.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC)
# TODO: change object to more specific GRR type information.
def _DownloadFiles(self, client, flow_id):
"""Download files from the specified flow.
Args:
client (object): GRR Client object to which to download flow data from.
flow_id (str): GRR identifier of the flow.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file
class GRRArtifactCollector(GRRFlow):
"""Artifact collector for GRR flows.
Attributes:
artifacts (list[str]): artifact definition names.
extra_artifacts (list[str]): extra artifact definition names.
hostnames (list[str]): FDQNs of the GRR client hosts.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect file
system artifacts.
"""
_DEFAULT_ARTIFACTS_LINUX = [
'LinuxAuditLogs', 'LinuxAuthLogs', 'LinuxCronLogs', 'LinuxWtmp',
'AllUsersShellHistory', 'ZeitgeistDatabase'
]
_DEFAULT_ARTIFACTS_DARWIN = [
'MacOSRecentItems', 'MacOSBashHistory', 'MacOSLaunchAgentsPlistFiles',
'MacOSAuditLogFiles', 'MacOSSystemLogFiles', 'MacOSAppleSystemLogFiles',
'MacOSMiscLogs', 'MacOSSystemInstallationTime', 'MacOSQuarantineEvents',
'MacOSLaunchDaemonsPlistFiles', 'MacOSInstallationHistory',
'MacOSUserApplicationLogs', 'MacOSInstallationLogFile'
]
_DEFAULT_ARTIFACTS_WINDOWS = [
'WindowsAppCompatCache', 'WindowsEventLogs', 'WindowsPrefetchFiles',
'WindowsScheduledTasks', 'WindowsSearchDatabase',
'WindowsSuperFetchFiles', 'WindowsSystemRegistryFiles',
'WindowsUserRegistryFiles', 'WindowsXMLEventLogTerminalServices'
]
artifact_registry = {
'Linux': _DEFAULT_ARTIFACTS_LINUX,
'Darwin': _DEFAULT_ARTIFACTS_DARWIN,
'Windows': _DEFAULT_ARTIFACTS_WINDOWS
}
def __init__(self, state):
super(GRRArtifactCollector, self).__init__(state)
self._clients = []
self.artifacts = []
self.extra_artifacts = []
self.hostnames = None
self.use_tsk = False
# pylint: disable=arguments-differ,too-many-arguments
def SetUp(self,
hosts, artifacts, extra_artifacts, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR artifact collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
artifacts (str): comma-separated artifact definition names.
extra_artifacts (str): comma-separated extra artifact definition names.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect file
system artifacts.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRArtifactCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password, approvers=approvers,
verify=verify)
if artifacts is not None:
self.artifacts = [item.strip() for item in artifacts.strip().split(',')]
if extra_artifacts is not None:
self.extra_artifacts = [item.strip() for item
in extra_artifacts.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single GRR client.
This function is used as a callback for the processing thread.
Args:
client (object): a GRR client object.
"""
system_type = client.data.os_info.system
print('System type: {0:s}'.format(system_type))
# If the list is supplied by the user via a flag, honor that.
artifact_list = []
if self.artifacts:
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
artifact_list = self.artifacts
else:
default_artifacts = self.artifact_registry.get(system_type, None)
if default_artifacts:
print('Collecting default artifacts for {0:s}: {1:s}'.format(
system_type, ', '.join(default_artifacts)))
artifact_list.extend(default_artifacts)
if self.extra_artifacts:
print('Throwing in an extra {0!s}'.format(self.extra_artifacts))
artifact_list.extend(self.extra_artifacts)
artifact_list = list(set(artifact_list))
if not artifact_list:
return
flow_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False)
flow_id = self._LaunchFlow(client, 'ArtifactCollectorFlow', flow_args)
if not flow_id:
msg = 'Flow could not be launched on {0:s}.'.format(client.client_id)
msg += '\nArtifactCollectorFlow args: {0!s}'.format(flow_args)
self.state.AddError(msg, critical=True)
return
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadFiles(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects artifacts from a host with GRR.
Raises:
DFTimewolfError: if no artifacts specified nor resolved by platform.
"""
threads = []
for client in self._FindClients(self.hostnames):
print(client)
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
class GRRFileCollector(GRRFlow):
"""File collector for GRR flows.
Attributes:
files (list[str]): file paths.
hostnames (list[str]): FDQNs of the GRR client hosts.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect files.
action (FileFinderAction): Enum denoting action to take.
"""
_ACTIONS = {'download': flows_pb2.FileFinderAction.DOWNLOAD,
'hash': flows_pb2.FileFinderAction.HASH,
'stat': flows_pb2.FileFinderAction.STAT,
}
def __init__(self, state):
super(GRRFileCollector, self).__init__(state)
self._clients = []
self.files = []
self.hostnames = None
self.use_tsk = False
self.action = None
# pylint: disable=arguments-differ,too-many-arguments
def SetUp(self,
hosts, files, use_tsk,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True, action='download'):
"""Initializes a GRR file collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
files (str): comma-separated file paths.
use_tsk (bool): True if GRR should use Sleuthkit (TSK) to collect files.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
action (Optional[str]): Action (download/hash/stat) (default: download).
"""
super(GRRFileCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if files is not None:
self.files = [item.strip() for item in files.strip().split(',')]
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self.use_tsk = use_tsk
if action.lower() in self._ACTIONS:
self.action = self._ACTIONS[action.lower()]
if self.action is None:
self.state.AddError("Invalid action {0!s}".format(action),
critical=True)
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single client.
This function is used as a callback for the processing thread.
Args:
client (object): GRR client object to act on.
"""
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=self.action)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._LaunchFlow(client, 'FileFinder', flow_args)
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadFiles(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects files from a host with GRR.
Raises:
DFTimewolfError: if no files specified.
"""
threads = []
for client in self._FindClients(self.hostnames):
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
class GRRFlowCollector(GRRFlow):
"""Flow collector.
Attributes:
client_id (str): GRR identifier of the client.
flow_id (str): GRR identifier of the flow to retrieve.
host (str): Target of GRR collection.
"""
def __init__(self, state):
super(GRRFlowCollector, self).__init__(state)
self.client_id = None
self.flow_id = None
self.host = None
# pylint: disable=arguments-differ
def SetUp(self,
host, flow_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR flow collector.
Args:
host (str): hostname of machine.
flow_id (str): GRR identifier of the flow to retrieve.
reason (str): justification for GRR access.
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRFlowCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.flow_id = flow_id
self.host = host
def Process(self):
"""Downloads the results of a GRR collection flow.
Raises:
DFTimewolfError: if no files specified
"""
client = self._GetClientByHostname(self.host)
self._AwaitFlow(client, self.flow_id)
collected_flow_data = self._DownloadFiles(client, self.flow_id)
if collected_flow_data:
print('{0:s}: Downloaded: {1:s}'.format(
self.flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
class GRRTimelineCollector(GRRFlow):
"""Timeline collector for GRR flows.
Attributes:
root_path (str): root path.
hostnames (list[str]): FDQNs of the GRR client hosts.
"""
def __init__(self, state):
super(GRRTimelineCollector, self).__init__(state)
self._clients = []
self.root_path = None
self.hostnames = None
self._timeline_format = None
# We're overriding the behavior of GRRFlow's SetUp function to include new
# parameters.
# pylint: disable=arguments-differ
def SetUp(self,
hosts, root_path,
reason, timeline_format, grr_server_url, grr_username, grr_password,
approvers=None, verify=True):
"""Initializes a GRR timeline collector.
Args:
hosts (str): comma-separated hostnames to launch the flow on.
root_path (str): path to start the recursive timeline.
reason (str): justification for GRR access.
timeline_format (str): Timeline format (1 is BODY, 2 is RAW).
grr_server_url (str): GRR server URL.
grr_username (str): GRR username.
grr_password (str): GRR password.
approvers (Optional[str]): list of GRR approval recipients.
verify (Optional[bool]): True to indicate GRR server's x509 certificate
should be verified.
"""
super(GRRTimelineCollector, self).SetUp(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
if root_path is not None:
self.root_path = root_path.strip()
self.hostnames = [item.strip() for item in hosts.strip().split(',')]
self._timeline_format = int(timeline_format)
self.root_path = root_path.encode()
if self._timeline_format not in [1, 2]:
self.state.AddError('Timeline format must be 1 (BODY) or 2 (RAW).', True)
# TODO: change object to more specific GRR type information.
def _ProcessThread(self, client):
"""Processes a single client.
This function is used as a callback for the processing thread.
Args:
client (object): GRR client object to act on.
"""
root_path = self.root_path
if not root_path:
return
print('Timeline to start from \'{0:s}\' items'.format(root_path.decode()))
timeline_args = timeline_pb2.TimelineArgs(root=root_path,)
flow_id = self._LaunchFlow(client, 'TimelineFlow', timeline_args)
self._AwaitFlow(client, flow_id)
collected_flow_data = self._DownloadTimeline(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
container = containers.File(
name=client.data.os_info.fqdn.lower(),
path=collected_flow_data
)
self.state.StoreContainer(container)
def Process(self):
"""Collects a timeline from a host with GRR.
Raises:
DFTimewolfError: if no files specified.
"""
threads = []
for client in self._FindClients(self.hostnames):
thread = threading.Thread(target=self._ProcessThread, args=(client, ))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def _DownloadTimeline(self, client, flow_id):
"""Download a timeline in BODY format from the specified flow.
Args:
client (object): GRR Client object to which to download flow data from.
flow_id (str): GRR identifier of the flow.
Returns:
str: path of downloaded files.
"""
extension = 'body' if self._timeline_format == 1 else 'raw'
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, extension)))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
timeline = flow.GetCollectedTimeline(self._timeline_format)
timeline.WriteToFile(output_file_path)
return output_file_path
modules_manager.ModulesManager.RegisterModules([
GRRArtifactCollector,
GRRFileCollector,
GRRFlowCollector,
GRRTimelineCollector])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.