gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
import tensorflow as tf
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_accumulator
from tensorboard.backend.event_processing import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@Tensors
"""
def __init__(self,
run_path_map=None,
size_guidance=None,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
tf.logging.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = (size_guidance or
event_accumulator.DEFAULT_SIZE_GUIDANCE)
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
tf.logging.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
tf.logging.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@dandelionmane) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
tf.logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
tf.logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
tf.logging.info('Starting AddRunsFromDirectory: %s', path)
for subdir in GetLogdirSubdirectories(path):
tf.logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
tf.logging.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
tf.logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
tf.logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
tf.logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
tf.logging.info('Finished with EventMultiplexer.Reload()')
return self
def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self.GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag)
def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag)
def PluginRunToTagToContent(self, plugin_name):
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}.
"""
mapping = {}
for run in self.Runs():
try:
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
plugin_name)
except KeyError:
# This run lacks content for the plugin. Try the next run.
continue
mapping[run] = tag_to_content
return mapping
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `tf.SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def GetAccumulator(self, run):
"""Returns EventAccumulator for a given run.
Args:
run: String name of run.
Returns:
An EventAccumulator object.
Raises:
KeyError: If run does not exist.
"""
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if tf.gfile.Exists(path) and not tf.gfile.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
|
|
#
# Tool to add an OATH token to the yhsm-validation-server database.
#
# Copyright (c) 2011 Yubico AB
# See the file COPYING for licence statement.
#
import sys
import struct
import sqlite3
import argparse
import pyhsm
import pyhsm.oath_hotp
from hashlib import sha1
default_device = "/dev/ttyACM0"
default_db_file = "/var/yubico/yhsm-validation-server.db"
def parse_args():
"""
Parse the command line arguments
"""
global default_device
parser = argparse.ArgumentParser(description = 'Initialize OATH token for use with yhsm-validation-server',
add_help=True,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-D', '--device',
dest='device',
default=default_device,
required=False,
help='YubiHSM device',
)
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable verbose operation',
)
parser.add_argument('--debug',
dest='debug',
action='store_true', default=False,
help='Enable debug operation',
)
parser.add_argument('--force',
dest='force',
action='store_true', default=False,
help='Overwrite any present entry',
)
parser.add_argument('--key-handle',
dest='key_handle',
required=True,
help='Key handle to create AEAD',
metavar='HANDLE',
)
parser.add_argument('--uid',
dest='uid',
required=True,
help='User ID',
metavar='STR',
)
parser.add_argument('--oath-c',
dest='oath_c',
required=False,
default = 0,
help='Initial OATH counter value',
metavar='INT',
)
parser.add_argument('--test-oath-window',
dest='look_ahead',
required=False,
default = 10,
help='Number of codes to search with --test-code',
metavar='INT',
)
parser.add_argument('--test-code',
dest='test_code',
type=int, required=False,
help='Optional OTP from token for verification',
metavar='INT',
)
parser.add_argument('--oath-k',
dest='oath_k',
required=False,
help='The secret key of the token, hex encoded',
metavar='HEXSTR',
)
parser.add_argument('--db-file',
dest='db_file',
default=default_db_file,
required=False,
help='DB file for storing AEAD\'s for --pwhash and --oath in the yhsm-validation-server',
metavar='FN',
)
args = parser.parse_args()
return args
def args_fixup(args):
keyhandles_fixup(args)
def keyhandles_fixup(args):
args.key_handle = pyhsm.util.key_handle_to_int(args.key_handle)
def generate_aead(hsm, args):
""" Protect the oath-k in an AEAD. """
key = get_oath_k(args)
# Enabled flags 00010000 = YSM_HMAC_SHA1_GENERATE
flags = struct.pack("< I", 0x10000)
hsm.load_secret(key + flags)
nonce = hsm.get_nonce().nonce
aead = hsm.generate_aead(nonce, args.key_handle)
if args.debug:
print "AEAD: %s (%s)" % (aead.data.encode('hex'), aead)
return nonce, aead
def validate_oath_c(hsm, args, nonce, aead):
if args.test_code:
if args.verbose:
print "Trying to validate the OATH counter value in the range %i..%i." \
% (args.oath_c, args.oath_c + args.look_ahead)
counter = pyhsm.oath_hotp.search_for_oath_code(hsm, args.key_handle, nonce, aead, \
args.oath_c, args.test_code, args.look_ahead \
)
if type(counter) != int:
sys.stderr.write("Failed validating OTP %s (in range %i..%i) using supplied key.\n" \
% (args.test_code, args.oath_c, args.oath_c + args.look_ahead))
sys.exit(1)
if args.verbose:
print "OATH C==%i validated with code %s" % (counter - 1, args.test_code)
return counter
return args.oath_c
def get_oath_k(args):
""" Get the OATH K value (secret key), either from args or by prompting. """
if args.oath_k:
decoded = args.oath_k.decode('hex')
else:
t = raw_input("Enter OATH key (hex encoded) : ")
decoded = t.decode('hex')
if len(decoded) > 20:
decoded = sha1(decoded).digest()
decoded = decoded.ljust(20, '\0')
return decoded
class ValOathDb():
""" Provides access to database with AEAD's and other information. """
def __init__(self, filename):
self.filename = filename
self.conn = sqlite3.connect(self.filename)
self.create_table()
def create_table(self):
c = self.conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS oath " \
"(key TEXT PRIMARY KEY, nonce TEXT, key_handle INTEGER, aead TEXT, oath_C INTEGER, oath_T INTEGER)")
def add(self, entry):
""" Add entry to database. """
c = self.conn.cursor()
c.execute("INSERT INTO oath (key, aead, nonce, key_handle, oath_C, oath_T) VALUES (?, ?, ?, ?, ?, ?)",
(entry.data["key"], \
entry.data["aead"], \
entry.data["nonce"], \
entry.data["key_handle"], \
entry.data["oath_C"], \
entry.data["oath_T"],))
self.conn.commit()
return c.rowcount == 1
def delete(self, entry):
""" Delete entry from database. """
c = self.conn.cursor()
c.execute("DELETE FROM oath WHERE key = ?", (entry.data["key"],))
class ValOathEntry():
""" Class to hold a row of ValOathDb. """
def __init__(self, row):
if row:
self.data = row
def store_oath_entry(args, nonce, aead, oath_c):
""" Store the AEAD in the database. """
data = {"key": args.uid,
"aead": aead.data.encode('hex'),
"nonce": nonce.encode('hex'),
"key_handle": args.key_handle,
"oath_C": oath_c,
"oath_T": None,
}
entry = ValOathEntry(data)
db = ValOathDb(args.db_file)
try:
if args.force:
db.delete(entry)
db.add(entry)
except sqlite3.IntegrityError, e:
sys.stderr.write("ERROR: %s\n" % (e))
return False
return True
def main():
args = parse_args()
args_fixup(args)
print "Key handle : %s" % (args.key_handle)
print "YHSM device : %s" % (args.device)
print ""
hsm = pyhsm.YHSM(device = args.device, debug=args.debug)
nonce, aead = generate_aead(hsm, args)
oath_c = validate_oath_c(hsm, args, nonce, aead)
if not store_oath_entry(args, nonce, aead, oath_c):
return 1
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.MMTPClient
This module contains a single, synchronous implementation of the client
side of the Mixminion Transfer protocol. You can use this client to
upload packets to any conforming Mixminion server.
(We don't use this module for transferring packets between servers;
in fact, MMTPServer makes it redundant. We only keep this module
around [A] so that clients have an easy (blocking) interface to
introduce packets into the system, and [B] so that we've got an
easy-to-verify reference implementation of the protocol.)
"""
__all__ = [ "MMTPClientConnection", "sendPackets", "DeliverableMessage" ]
import logging
import socket
import sys
import time
import mixminion._minionlib as _ml
import mixminion.NetUtils
import mixminion.ServerInfo
import mixminion.TLSConnection
from mixminion.Crypto import sha1, getCommonPRNG
from mixminion.Common import MixProtocolError, MixProtocolReject, \
MixProtocolBadAuth, MixError, formatBase64, stringContains, \
TimeoutError
from mixminion.Packet import IPV4Info, MMTPHostInfo
log = logging.getLogger(__name__)
def _noop(*k,**v): pass
class EventStatsDummy:
def __getattr__(self,a):
return _noop
EventStats = EventStatsDummy()
EventStats.elog = EventStats
def useEventStats():
import mixminion.server.EventStats
global EventStats
EventStats = mixminion.server.EventStats
class DeliverableMessage:
"""Interface to be implemented by messages deliverable by MMTP"""
def getContents(self):
raise NotImplementedError
def isJunk(self):
raise NotImplementedError
def succeeded(self):
raise NotImplementedError
def failed(self,retriable=0):
raise NotImplementedError
class MMTPClientConnection(mixminion.TLSConnection.TLSConnection):
"""A nonblocking MMTP connection sending packets and padding to a single
server."""
# Which MMTP versions do we understand?
PROTOCOL_VERSIONS = ['0.3']
# If we've written WRITEAHEAD packets without receiving any acks, we wait
# for an ack before sending any more.
WRITEAHEAD = 6
# Length of a single transmission unit (control string, packet, checksum)
MESSAGE_LEN = 6 + (1<<15) + 20
# Length of a single acknowledgment (control string, digest)
ACK_LEN = 10+20
## Fields:
# targetAddr, targetPort, targetKeyID: the address and keyid of the
# server we're trying to connect to.
# certCache: an instance of PeerCertificateCache to use to check the
# peer server's certificate
# packets: a list of DeliverableMessage objects that have not yet been
# sent to the TLS connection, in the order they should be sent.
# pendingPackets: a list of DeliverableMessage objects that have been
# sent to the TLS connection, but which have not yet been acknowledged.
# nPacketsTotal: total number of packets we've ever been asked to send.
# nPacketsSent: total number of packets sent across the TLS connection
# nPacketsAcked: total number of acks received from the TLS connection
# expectedAcks: list of acceptAck,rejectAck tuples for the packets
# that we've sent but haven't gotten acks for.
# _isConnected: flag: true if the TLS connection been completed,
# and no errors have been encountered.
# _isFailed: flag: has this connection encountered any errors?
# _isAlive: flag: if we put another packet on this connection, will the
# packet maybe get delivered?
####
# External interface
####
def __init__(self, targetFamily, targetAddr, targetPort, targetKeyID,
serverName=None, context=None, certCache=None):
"""Initialize a new MMTPClientConnection."""
assert targetFamily in (mixminion.NetUtils.AF_INET,
mixminion.NetUtils.AF_INET6)
if context is None:
context = _ml.TLSContext_new()
if serverName is None:
serverName = mixminion.ServerInfo.displayServerByRouting(
IPV4Info(targetAddr, targetPort, targetKeyID))
if certCache is None:
certCache = PeerCertificateCache()
self.targetAddr = targetAddr
self.targetPort = targetPort
sock = socket.socket(targetFamily, socket.SOCK_STREAM)
serverName += " (fd %s)"%sock.fileno()
sock.setblocking(0)
try:
sock.connect((targetAddr, targetPort))
except socket.error, e:
# This will always raise an error, since we're nonblocking. That's
# okay... but it had better be EINPROGRESS or the local equivalent.
if e[0] not in mixminion.NetUtils.IN_PROGRESS_ERRNOS:
raise e
tls = context.sock(sock)
mixminion.TLSConnection.TLSConnection.__init__(self, tls, sock,
serverName)
if targetKeyID != '\x00' * 20:
self.targetKeyID = targetKeyID
else:
self.targetKeyID = None
self.certCache = certCache
self.packets = []
self.pendingPackets = []
self.expectedAcks = []
self.nPacketsSent = self.nPacketsAcked = self.nPacketsTotal =0
self._isConnected = 0
self._isFailed = 0
self._isAlive = 1
EventStats.elog.attemptedConnect()
log.debug("Opening client connection to %s",self.address)
self.beginConnecting()
def addPacket(self, deliverableMessage):
"""Queue 'deliverableMessage' for transmission. When it has been
acknowledged, deliverableMessage.succeeded will be called. On
failure, deliverableMessage.failed will be called."""
assert hasattr(deliverableMessage, 'getContents')
self.packets.append(deliverableMessage)
self.nPacketsTotal += 1
# If we're connected, maybe start sending the packet we just added.
self._updateRWState()
####
# Implementation
####
def _startSendingNextPacket(self):
"Helper: begin transmitting the next available packet."
# There _is_ a next available packet, right?
assert self.packets and self._isConnected
pkt = self.packets.pop(0)
if pkt.isJunk():
control = "JUNK\r\n"
serverControl = "RECEIVED\r\n"
hashExtra = "JUNK"
serverHashExtra = "RECEIVED JUNK"
else:
control = "SEND\r\n"
serverControl = "RECEIVED\r\n"
hashExtra = "SEND"
serverHashExtra = "RECEIVED"
EventStats.elog.attemptedRelay()
m = pkt.getContents()
if m == 'RENEGOTIATE':
# Renegotiate has been removed from the spec.
return
data = "".join([control, m, sha1(m+hashExtra)])
assert len(data) == self.MESSAGE_LEN
acceptedAck = serverControl + sha1(m+serverHashExtra)
rejectedAck = "REJECTED\r\n" + sha1(m+"REJECTED")
assert len(acceptedAck) == len(rejectedAck) == self.ACK_LEN
self.expectedAcks.append( (acceptedAck, rejectedAck) )
self.pendingPackets.append(pkt)
self.beginWriting(data)
self.nPacketsSent += 1
def _updateRWState(self):
"""Helper: if we have any queued packets that haven't been sent yet,
and we aren't waiting for WRITEAHEAD acks, and we're connected,
start sending the pending packets.
"""
if not self._isConnected: return
while self.nPacketsSent < self.nPacketsAcked + self.WRITEAHEAD:
if not self.packets:
break
log.trace("Queueing new packet for %s",self.address)
self._startSendingNextPacket()
if self.nPacketsAcked == self.nPacketsSent:
log.debug("Successfully relayed all packets to %s",self.address)
self.allPacketsSent()
self._isConnected = 0
self._isAlive = 0
self.startShutdown()
def _failPendingPackets(self):
"Helper: tell all unacknowledged packets to fail."
self._isConnected = 0
self._isFailed = 1
self._isAlive = 0
pkts = self.pendingPackets + self.packets
self.pendingPackets = []
self.packets = []
for p in pkts:
if p.isJunk():
EventStats.elog.failedRelay()
p.failed(1)
####
# Implementation: hooks
####
def onConnected(self):
log.debug("Completed MMTP client connection to %s",self.address)
# Is the certificate correct?
try:
self.certCache.check(self.tls, self.targetKeyID, self.address)
except MixProtocolBadAuth, e:
log.warn("Certificate error: %s. Shutting down connection.", e)
self._failPendingPackets()
self.startShutdown()
return
else:
log.debug("KeyID is valid from %s", self.address)
EventStats.elog.successfulConnect()
# The certificate is fine; start protocol negotiation.
self.beginWriting("MMTP %s\r\n" % ",".join(self.PROTOCOL_VERSIONS))
self.onWrite = self.onProtocolWritten
def onProtocolWritten(self,n):
if self.outbuf:
# Not done writing outgoing data.
return
log.debug("Sent MMTP protocol string to %s", self.address)
self.stopWriting()
self.beginReading()
self.onRead = self.onProtocolRead
def onProtocolRead(self):
# Pull the contents of the buffer up to the first CRLF
s = self.getInbufLine(4096,clear=1)
if s is None:
# We have <4096 bytes, and no CRLF yet
return
elif s == -1:
# We got 4096 bytes with no CRLF, or a CRLF with more data
# after it.
self._failPendingPackets()
self.startShutdown()
return
# Find which protocol the server chose.
self.protocol = None
for p in self.PROTOCOL_VERSIONS:
if s == "MMTP %s\r\n"%p:
self.protocol = p
break
if not self.protocol:
log.warn("Protocol negotiation failed with %s", self.address)
self._failPendingPackets()
self.startShutdown()
return
log.debug("MMTP protocol negotiated with %s: version %s",
self.address, self.protocol)
# Now that we're connected, optimize for throughput.
mixminion.NetUtils.optimizeThroughput(self.sock)
self.onRead = self.onDataRead
self.onWrite = self.onDataWritten
self.beginReading()
self._isConnected = 1
# Now that we're connected, start sending packets.
self._updateRWState()
def onDataRead(self):
# We got some data from the server: it'll be 0 or more acks.
if self.inbuflen < self.ACK_LEN:
# If we have no acks at all, do nothing.
return
while self.inbuflen >= self.ACK_LEN:
if not self.expectedAcks:
log.warn("Received acknowledgment from %s with no corresponding message", self.address)
self._failPendingPackets()
self.startShutdown()
return
ack = self.getInbuf(self.ACK_LEN, clear=1)
good, bad = self.expectedAcks.pop(0)
if ack == good:
log.debug("Packet delivered to %s",self.address)
self.nPacketsAcked += 1
if not self.pendingPackets[0].isJunk():
EventStats.elog.successfulRelay()
self.pendingPackets[0].succeeded()
del self.pendingPackets[0]
elif ack == bad:
log.warn("Packet rejected by %s", self.address)
self.nPacketsAcked += 1
if not self.pendingPackets[0].isJunk():
EventStats.elog.failedRelay()
self.pendingPackets[0].failed(1)
del self.pendingPackets[0]
else:
# The control string and digest are wrong for an accepted
# or rejected packet!
log.warn("Bad acknowledgement received from %s",self.address)
self._failPendingPackets()
self.startShutdown()
return
# Start sending more packets, if we were waiting for an ACK to do so.
self._updateRWState()
def onDataWritten(self,n):
# If we wrote some data, maybe we'll be ready to write more.
self._updateRWState()
def onTLSError(self):
# If we got an error, fail all our packets and don't accept any more.
if not self._isConnected:
EventStats.elog.failedConnect()
self._isConnected = 0
self._failPendingPackets()
def onTimeout(self):
self.onTLSError()
def onClosed(self): pass
def doneWriting(self): pass
def receivedShutdown(self):
log.warn("Received unexpected shutdown from %s", self.address)
self._failPendingPackets()
def shutdownFinished(self): pass
def allPacketsSent(self):
"""Hook: called when we've received acks for all our pending packets"""
pass
def getAddr(self):
"""Return a 3-tuple of address,port,keyid for this connection"""
return self.targetAddr, self.targetPort, self.targetKeyID
def isActive(self):
"""Return true iff packets sent with this connection may be delivered.
"""
return self._isAlive
class DeliverableString(DeliverableMessage):
"""Subclass of DeliverableMessage suitable for use by ClientMain and
sendPackets. Sends str(s) for some object s; invokes a callback on
success."""
def __init__(self, s=None, isJunk=0, callback=None):
if isJunk:
self.s = getCommonPRNG().getBytes(1<<15)
else:
self.s = s
self.j = isJunk
self.cb = callback
self._failed = 0
self._succeeded = 0
def getContents(self):
return str(self.s)
def isJunk(self):
return self.j
def succeeded(self):
self.s = None
if self.cb is not None:
self.cb()
self._succeeded = 1
def failed(self,retriable):
self.s = None
self._failed = 1
def sendPackets(routing, packetList, timeout=300, callback=None):
"""Sends a list of packets to a server. Raise MixProtocolError on
failure.
routing -- an instance of mixminion.Packet.IPV4Info or
mixminion.Packet.MMTPHostInfo.
If routing.keyinfo == '\000'*20, we ignore the server's
keyid.
packetList -- a list of 32KB packets and control strings. Control
strings must be one of "JUNK" to send a 32KB padding chunk,
or "RENEGOTIATE" to renegotiate the connection key.
connectTimeout -- None, or a number of seconds to wait for data
on the connection before raising TimeoutError.
callback -- None, or a function to call with a index into packetList
after each successful packet delivery.
"""
# Find out where we're connecting to.
serverName = mixminion.ServerInfo.displayServerByRouting(routing)
if isinstance(routing, IPV4Info):
family, addr = socket.AF_INET, routing.ip
else:
assert isinstance(routing, MMTPHostInfo)
log.trace("Looking up %s...",routing.hostname)
family, addr, _ = mixminion.NetUtils.getIP(routing.hostname)
if family == "NOENT":
raise MixProtocolError("Couldn't resolve hostname %s: %s" % (
routing.hostname, addr))
# Create an MMTPClientConnection
try:
con = MMTPClientConnection(
family, addr, routing.port, routing.keyinfo, serverName=serverName)
except socket.error, e:
raise MixProtocolError(str(e))
# Queue the items on the list.
deliverables = []
for idx in xrange(len(packetList)):
p = packetList[idx]
if p == 'JUNK':
pkt = DeliverableString(isJunk=1)
elif p == 'RENEGOTIATE':
continue #XXXX no longer supported.
else:
if callback is not None:
def cb(idx=idx,callback=callback): callback(idx)
else:
cb = None
pkt = DeliverableString(s=p,callback=cb)
deliverables.append(pkt)
con.addPacket(pkt)
# Use select to run the connection until it's done.
import select
fd = con.fileno()
wr,ww,isopen = con.getStatus()
while isopen:
if wr:
rfds = [fd]
else:
rfds = []
if ww:
wfds = [fd]
else:
wfds = []
if ww==2:
xfds = [fd]
else:
xfds = []
rfds,wfds,xfds=select.select(rfds,wfds,xfds,3)
now = time.time()
wr,ww,isopen,_=con.process(fd in rfds, fd in wfds, 0)
if isopen:
if con.tryTimeout(now-timeout):
isopen = 0
# If anything wasn't delivered, raise MixProtocolError.
for d in deliverables:
if d._failed:
raise MixProtocolError("Error occurred while delivering packets to %s"%
serverName)
# If the connection failed, raise MixProtocolError.
if con._isFailed:
raise MixProtocolError("Error occurred on connection to %s"%serverName)
def pingServer(routing, timeout=60):
"""Try to connect to a server and send a junk packet.
May raise MixProtocolBadAuth, or other MixProtocolError if server
isn't up."""
sendPackets(routing, ["JUNK"], timeout=timeout)
class PeerCertificateCache:
"""A PeerCertificateCache validates certificate chains from MMTP servers,
and remembers which chains we've already seen and validated."""
## Fields
# cache: A map from peer (temporary) KeyID's to a (signing) KeyID.
def __init__(self):
self.cache = {}
def check(self, tls, targetKeyID, serverName):
"""Check whether the certificate chain on the TLS connection 'tls'
is valid, current, and matches the keyID 'targetKeyID'. If so,
return. If not, raise MixProtocolBadAuth. Display all messages
using the server 'serverName'.
"""
# First, make sure the certificate is neither premature nor expired.
try:
tls.check_cert_alive()
except _ml.TLSError, e:
s = str(e)
skewed=0
notBefore,notAfter = tls.get_cert_lifetime()
# XXXX 'stringContains' is not the best possible check here...
if stringContains(s, "expired"):
s += " [expired at %s]"%notAfter
skewed = 1
elif stringContains(s,"not yet valid"):
s += " [not valid until %s]"%notBefore
skewed = 1
if skewed:
s +=" (One of you may have a skewed clock or wrong time zone)"
raise MixProtocolBadAuth("Invalid certificate from %s: %s " % (
serverName, s))
# If we don't care whom we're talking to, we don't need to check
# them out.
if targetKeyID is None:
return
# Get the KeyID for the peer (temporary) key.
hashed_peer_pk = sha1(tls.get_peer_cert_pk().encode_key(public=1))
# Before 0.0.4alpha, a server's keyID was a hash of its current
# TLS public key. In 0.0.4alpha, we allowed this for backward
# compatibility. As of 0.0.4alpha2, since we've dropped backward
# compatibility with earlier packet formats, we drop certificate
# compatibility as well.
if targetKeyID == hashed_peer_pk:
raise MixProtocolBadAuth(
"Pre-0.0.4 (non-rotatable) certificate from %s" % serverName)
try:
if targetKeyID == self.cache[hashed_peer_pk]:
# We recognize the key, and have already seen it to be
# signed by the target identity.
log.trace("Got a cached certificate from %s", serverName)
return # All is well.
else:
# We recognize the key, but some other identity signed it.
raise MixProtocolBadAuth(
"Mismatch between expected and actual key ID")
except KeyError:
pass
# We haven't found an identity for this pk yet. Try to check the
# signature on it.
try:
identity = tls.verify_cert_and_get_identity_pk()
except _ml.TLSError, e:
raise MixProtocolBadAuth("Invalid KeyID (allegedly) from %s: %s"
%serverName)
# Okay, remember who has signed this certificate.
hashed_identity = sha1(identity.encode_key(public=1))
log.trace("Remembering valid certificate for %s", serverName)
self.cache[hashed_peer_pk] = hashed_identity
# Note: we don't need to worry about two identities signing the
# same certificate. While this *is* possible to do, it's useless:
# You could get someone else's certificate and sign it, but you
# couldn't start up a TLS connection with that certificate without
# stealing their private key too.
# Was the signer the right person?
if hashed_identity != targetKeyID:
raise MixProtocolBadAuth("Invalid KeyID for %s" % serverName)
|
|
import textwrap
import copy
from collections import OrderedDict
__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table']
class ColumnOrderList(list):
"""
List of tuples that sorts in a specific order that makes sense for
astropy table column attributes.
"""
def sort(self, *args, **kwargs):
super().sort()
column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta']
in_dict = dict(self)
out_list = []
for key in column_keys:
if key in in_dict:
out_list.append((key, in_dict[key]))
for key, val in self:
if key not in column_keys:
out_list.append((key, val))
# Clear list in-place
del self[:]
self.extend(out_list)
class ColumnDict(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct OrderedDict from !!omap in yaml safe load.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
Examples
--------
::
>>> yaml.load(''' # doctest: +SKIP
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
import yaml
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a sequence, but found {node.id}", node.start_mark)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a mapping of length 1, but found {subnode.id}",
subnode.start_mark)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
f"expected a single mapping item, but found {len(subnode.value)} items",
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
import yaml
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, 'tag:yaml.org,2002:omap', data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping('tag:yaml.org,2002:map', data)
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
"""
attrs = ColumnDict()
attrs['name'] = col.info.name
type_name = col.info.dtype.type.__name__
if type_name.startswith(('bytes', 'str')):
type_name = 'string'
if type_name.endswith('_'):
type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV
attrs['datatype'] = type_name
# Set the output attributes
for attr, nontrivial, xform in (('unit', lambda x: x is not None, str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {'cols': list(table.columns.values())}
if table.meta:
header['meta'] = table.meta
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package is '
'required for serializing mixin columns')
from astropy.io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
if hasattr(mapping, 'sort'):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header['datatype'] = [_get_col_attributes(col) for col in header['cols']]
del header['cols']
lines = yaml.dump(header, default_flow_style=None,
Dumper=TableDumper, width=130).splitlines()
return lines
class YamlParseError(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package '
'is required for serializing mixin columns')
from astropy.io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor('tag:yaml.org,2002:omap', _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent('\n'.join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError(str(err))
return header
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main, TestCase
from tempfile import mkstemp
from json import loads, dumps
from datetime import datetime
from os import close, remove
from os.path import exists
from tornado.web import HTTPError
import numpy.testing as npt
import pandas as pd
from qiita_core.testing import wait_for_processing_job
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
import qiita_db as qdb
from qiita_db.handlers.processing_job import _get_job
class UtilTests(TestCase):
def test_get_job(self):
obs = _get_job('6d368e16-2242-4cf8-87b4-a5dc40bb890b')
exp = qdb.processing_job.ProcessingJob(
'6d368e16-2242-4cf8-87b4-a5dc40bb890b')
self.assertEqual(obs, exp)
with self.assertRaises(HTTPError):
_get_job('do-not-exist')
class JobHandlerTests(OauthTestingBase):
def test_get_job_does_not_exists(self):
obs = self.get('/qiita_db/jobs/do-not-exist', headers=self.header)
self.assertEqual(obs.code, 404)
def test_get(self):
obs = self.get('/qiita_db/jobs/6d368e16-2242-4cf8-87b4-a5dc40bb890b',
headers=self.header)
self.assertEqual(obs.code, 200)
cmd = 'Split libraries FASTQ'
params = {"max_bad_run_length": 3,
"min_per_read_length_fraction": 0.75, "sequence_max_n": 0,
"rev_comp_barcode": False,
"rev_comp_mapping_barcodes": False, "rev_comp": False,
"phred_quality_threshold": 3, "barcode_type": "golay_12",
"max_barcode_errors": 1.5, "input_data": 1,
'phred_offset': 'auto'}
exp = {'command': cmd, 'parameters': params, 'status': 'success'}
self.assertEqual(loads(obs.body), exp)
def test_get_no_header(self):
obs = self.get('/qiita_db/jobs/6d368e16-2242-4cf8-87b4-a5dc40bb890b')
self.assertEqual(obs.code, 400)
class HeartbeatHandlerTests(OauthTestingBase):
def test_post_job_does_not_exists(self):
obs = self.post('/qiita_db/jobs/do-not-exist/heartbeat/', '',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_post_job_already_finished(self):
obs = self.post(
'/qiita_db/jobs/6d368e16-2242-4cf8-87b4-a5dc40bb890b/heartbeat/',
'', headers=self.header)
self.assertEqual(obs.code, 403)
self.assertEqual(obs.reason,
"Can't execute heartbeat on job: already completed")
def test_post(self):
before = datetime.now()
obs = self.post(
'/qiita_db/jobs/bcc7ebcd-39c1-43e4-af2d-822e3589f14d/heartbeat/',
'', headers=self.header)
self.assertEqual(obs.code, 200)
job = qdb.processing_job.ProcessingJob(
'bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
self.assertTrue(before < job.heartbeat < datetime.now())
def test_post_no_header(self):
obs = self.post(
'/qiita_db/jobs/bcc7ebcd-39c1-43e4-af2d-822e3589f14d/heartbeat/',
'')
self.assertEqual(obs.code, 400)
def test_post_first_heartbeat(self):
before = datetime.now()
job = qdb.processing_job.ProcessingJob(
'063e553b-327c-4818-ab4a-adfe58e49860')
self.assertEqual(job.status, 'queued')
obs = self.post(
'/qiita_db/jobs/063e553b-327c-4818-ab4a-adfe58e49860/heartbeat/',
'', headers=self.header)
self.assertEqual(obs.code, 200)
self.assertTrue(before < job.heartbeat < datetime.now())
self.assertEqual(job.status, 'running')
class ActiveStepHandlerTests(OauthTestingBase):
def test_post_no_header(self):
obs = self.post(
'/qiita_db/jobs/063e553b-327c-4818-ab4a-adfe58e49860/step/', '')
self.assertEqual(obs.code, 400)
def test_post_job_does_not_exists(self):
obs = self.post('/qiita_db/jobs/do-not-exist/step/', '',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_post_non_running_job(self):
payload = dumps({'step': 'Step 1 of 4: demultiplexing'})
obs = self.post(
'/qiita_db/jobs/063e553b-327c-4818-ab4a-adfe58e49860/step/',
payload, headers=self.header)
self.assertEqual(obs.code, 403)
self.assertEqual(obs.reason, "Cannot change the step of a job whose "
"status is not 'running'")
def test_post(self):
payload = dumps({'step': 'Step 1 of 4: demultiplexing'})
obs = self.post(
'/qiita_db/jobs/bcc7ebcd-39c1-43e4-af2d-822e3589f14d/step/',
payload, headers=self.header)
self.assertEqual(obs.code, 200)
job = qdb.processing_job.ProcessingJob(
'bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
self.assertEqual(job.step, 'Step 1 of 4: demultiplexing')
class CompleteHandlerTests(OauthTestingBase):
def setUp(self):
self._clean_up_files = []
super(CompleteHandlerTests, self).setUp()
def tearDown(self):
super(CompleteHandlerTests, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
remove(fp)
def test_post_no_header(self):
obs = self.post(
'/qiita_db/jobs/063e553b-327c-4818-ab4a-adfe58e49860/complete/',
'')
self.assertEqual(obs.code, 400)
def test_post_job_does_not_exists(self):
obs = self.post('/qiita_db/jobs/do-not-exist/complete/', '',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_post_job_not_running(self):
payload = dumps({'success': True, 'artifacts': []})
obs = self.post(
'/qiita_db/jobs/063e553b-327c-4818-ab4a-adfe58e49860/complete/',
payload, headers=self.header)
self.assertEqual(obs.code, 403)
self.assertEqual(obs.body.decode('ascii'),
"Can't complete job: not in a running state")
def test_post_job_failure(self):
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
qdb.study.Study(1), '16S')
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command.get_validator('BIOM'),
values_dict={'template': pt.id, 'files':
dumps({'BIOM': ['file']}),
'artifact_type': 'BIOM'}))
job._set_status('running')
payload = dumps({'success': False, 'error': 'Job failure'})
obs = self.post(
'/qiita_db/jobs/%s/complete/' % job.id,
payload, headers=self.header)
self.assertEqual(obs.code, 200)
wait_for_processing_job(job.id)
self.assertEqual(job.status, 'error')
self.assertEqual(job.log,
qdb.logger.LogEntry.newest_records(numrecords=1)[0])
self.assertEqual(job.log.msg, 'Job failure')
def test_post_job_success(self):
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
qdb.study.Study(1), '16S')
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command.get_validator('BIOM'),
values_dict={'template': pt.id, 'files':
dumps({'BIOM': ['file']}),
'artifact_type': 'BIOM'}))
job._set_status('running')
fd, fp = mkstemp(suffix='_table.biom')
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self._clean_up_files.append(fp)
exp_artifact_count = qdb.util.get_count('qiita.artifact') + 1
payload = dumps(
{'success': True, 'error': '',
'artifacts': {'OTU table': {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}}})
obs = self.post(
'/qiita_db/jobs/%s/complete/' % job.id,
payload, headers=self.header)
wait_for_processing_job(job.id)
self.assertEqual(obs.code, 200)
self.assertEqual(job.status, 'success')
self.assertEqual(qdb.util.get_count('qiita.artifact'),
exp_artifact_count)
def test_post_job_success_with_archive(self):
pt = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.prep_template.PrepTemplate.create,
pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
qdb.study.Study(1), '16S')
job = qdb.processing_job.ProcessingJob.create(
qdb.user.User('test@foo.bar'),
qdb.software.Parameters.load(
qdb.software.Command.get_validator('BIOM'),
values_dict={'template': pt.id, 'files':
dumps({'BIOM': ['file']}),
'artifact_type': 'BIOM'}))
job._set_status('running')
fd, fp = mkstemp(suffix='_table.biom')
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self._clean_up_files.append(fp)
payload = dumps(
{'success': True, 'error': '',
'artifacts': {'OTU table': {'filepaths': [(fp, 'biom')],
'artifact_type': 'BIOM'}},
'archive': {'AAAA': 'AAA', 'CCC': 'CCC'}})
obs = self.post(
'/qiita_db/jobs/%s/complete/' % job.id,
payload, headers=self.header)
wait_for_processing_job(job.id)
self.assertEqual(obs.code, 200)
class ProcessingJobAPItestHandlerTests(OauthTestingBase):
def test_post_processing_job(self):
data = {
'user': 'demo@microbio.me',
'command': dumps(['QIIME', '1.9.1', 'Pick closed-reference OTUs']),
'parameters': dumps({"reference": 1,
"sortmerna_e_value": 1,
"sortmerna_max_pos": 10000,
"similarity": 0.97,
"sortmerna_coverage": 0.97,
"threads": 1,
"input_data": 1})
}
obs = self.post('/apitest/processing_job/', headers=self.header,
data=data)
self.assertEqual(obs.code, 200)
obs = loads(obs.body)
self.assertCountEqual(obs.keys(), ['job'])
self.assertIsNotNone(obs['job'])
def test_post_processing_job_status(self):
data = {
'user': 'demo@microbio.me',
'command': dumps(['QIIME', '1.9.1', 'Pick closed-reference OTUs']),
'status': 'running',
'parameters': dumps({"reference": 1,
"sortmerna_e_value": 1,
"sortmerna_max_pos": 10000,
"similarity": 0.97,
"sortmerna_coverage": 0.97,
"threads": 1,
"input_data": 1})
}
obs = self.post('/apitest/processing_job/', headers=self.header,
data=data)
self.assertEqual(obs.code, 200)
obs = loads(obs.body)
self.assertCountEqual(obs.keys(), ['job'])
job_id = obs['job']
self.assertTrue(qdb.processing_job.ProcessingJob.exists(job_id))
self.assertEqual(qdb.processing_job.ProcessingJob(job_id).status,
'running')
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
"""
Python library for the Withings API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Withings Body metrics Services API
<https://developer.health.nokia.com/api>
Uses Oauth 1.0 to authentify. You need to obtain a consumer key
and consumer secret from Withings by creating an application
here: <https://oauth.withings.com/partner/add>
Usage:
auth = WithingsAuth(CONSUMER_KEY, CONSUMER_SECRET)
authorize_url = auth.get_authorize_url()
print "Go to %s allow the app and copy your oauth_verifier" % authorize_url
oauth_verifier = raw_input('Please enter your oauth_verifier: ')
creds = auth.get_credentials(oauth_verifier)
client = WithingsApi(creds)
measures = client.get_measures(limit=1)
print "Your last measured weight: %skg" % measures[0].weight
"""
from __future__ import unicode_literals
__title__ = 'withings'
__version__ = '0.1'
__author__ = 'Maxime Bouroumeau-Fuseau'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012 Maxime Bouroumeau-Fuseau'
__all__ = [str('WithingsCredentials'), str('WithingsAuth'), str('WithingsApi'),
str('WithingsMeasures'), str('WithingsMeasureGroup')]
import requests
from requests_oauthlib import OAuth1, OAuth1Session
import json
import datetime
class WithingsCredentials(object):
def __init__(self, access_token=None, access_token_secret=None,
consumer_key=None, consumer_secret=None, user_id=None):
self.access_token = access_token
self.access_token_secret = access_token_secret
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_id = user_id
class WithingsError(Exception):
STATUS_CODES = {
# Response status codes as defined in documentation
# https://developer.health.nokia.com/api/doc
0: u"Operation was successful",
247: u"The userid provided is absent, or incorrect",
250: u"The provided userid and/or Oauth credentials do not match",
286: u"No such subscription was found",
293: u"The callback URL is either absent or incorrect",
294: u"No such subscription could be deleted",
304: u"The comment is either absent or incorrect",
305: u"Too many notifications are already set",
342: u"The signature (using Oauth) is invalid",
343: u"Wrong Notification Callback Url don't exist",
601: u"Too Many Request",
2554: u"Wrong action or wrong webservice",
2555: u"An unknown error occurred",
2556: u"Service is not defined",
}
def __init__(self, status):
super(WithingsError, self).__init__(u'{}: {}'.format(status, WithingsError.STATUS_CODES[status]))
self.status = status
class WithingsAuth(object):
URL = 'https://developer.health.nokia.com/account'
def __init__(self, consumer_key, consumer_secret, callback_uri=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.oauth_token = None
self.oauth_secret = None
self.callback_uri=callback_uri
def get_authorize_url(self):
oauth = OAuth1Session(self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri)
tokens = oauth.fetch_request_token('%s/request_token' % self.URL)
self.oauth_token = tokens['oauth_token']
self.oauth_secret = tokens['oauth_token_secret']
return oauth.authorization_url('%s/authorize' % self.URL)
def get_credentials(self, oauth_verifier):
oauth = OAuth1Session(self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_secret,
verifier=oauth_verifier)
tokens = oauth.fetch_access_token('%s/access_token' % self.URL)
return WithingsCredentials(access_token=tokens['oauth_token'],
access_token_secret=tokens['oauth_token_secret'],
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
user_id=tokens['userid'])
class WithingsApi(object):
URL = 'https://api.health.nokia.com'
def __init__(self, credentials):
self.credentials = credentials
self.oauth = OAuth1(credentials.consumer_key,
credentials.consumer_secret,
credentials.access_token,
credentials.access_token_secret,
signature_type='query')
self.client = requests.Session()
self.client.auth = self.oauth
self.client.params.update({'userid': credentials.user_id})
def request(self, service, action, params=None, method='GET'):
if params is None:
params = {}
params['action'] = action
r = self.client.request(method, '%s/%s' % (self.URL, service), params=params)
response = json.loads(r.content.decode())
if response['status'] != 0:
raise WithingsError(response['status'])
return response.get('body', None)
def get_user(self):
return self.request('user', 'getbyuserid')
def get_measures(self, **kwargs):
r = self.request('measure', 'getmeas', kwargs)
return WithingsMeasures(r)
def subscribe(self, callback_url, comment, appli=1):
params = {'callbackurl': callback_url,
'comment': comment,
'appli': appli}
self.request('notify', 'subscribe', params)
def unsubscribe(self, callback_url, appli=1):
params = {'callbackurl': callback_url, 'appli': appli}
self.request('notify', 'revoke', params)
def is_subscribed(self, callback_url, appli=1):
params = {'callbackurl': callback_url, 'appli': appli}
try:
self.request('notify', 'get', params)
return True
except:
return False
def list_subscriptions(self, appli=1):
r = self.request('notify', 'list', {'appli': appli})
return r['profiles']
class WithingsMeasures(list):
def __init__(self, data):
super(WithingsMeasures, self).__init__([WithingsMeasureGroup(g) for g in data['measuregrps']])
self.updatetime = datetime.datetime.fromtimestamp(data['updatetime'])
class WithingsMeasureGroup(object):
MEASURE_TYPES = (('weight', 1), ('height', 4), ('fat_free_mass', 5),
('fat_ratio', 6), ('fat_mass_weight', 8),
('diastolic_blood_pressure', 9), ('systolic_blood_pressure', 10),
('heart_pulse', 11))
def __init__(self, data):
self.data = data
self.grpid = data['grpid']
self.attrib = data['attrib']
self.category = data['category']
self.date = datetime.datetime.fromtimestamp(data['date'])
self.measures = data['measures']
for n, t in self.MEASURE_TYPES:
self.__setattr__(n, self.get_measure(t))
def is_ambiguous(self):
return self.attrib == 1 or self.attrib == 4
def is_measure(self):
return self.category == 1
def is_target(self):
return self.category == 2
def get_measure(self, measure_type):
for m in self.measures:
if m['type'] == measure_type:
return m['value'] * pow(10, m['unit'])
return None
|
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""Simple-to-use Python interface to The TVDB's API (www.thetvdb.com)
Example usage:
>>> from tvdb_api import Tvdb
>>> t = Tvdb()
>>> t['Lost'][4][11]['episodename']
u'Cabin Fever'
"""
__author__ = "dbr/Ben"
__version__ = "1.6.4"
import os
import urllib
import urllib2
import StringIO
import tempfile
import warnings
import logging
import datetime
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from tvdb_cache import CacheHandler
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
lastTimeout = None
def log():
return logging.getLogger("tvdb_api")
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
pass
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term = None, key = None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term = term, key = key)
if len(searchresult) != 0:
results.extend(searchresult)
#end for cur_season
return results
class Season(dict):
def __init__(self, show = None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term = None, key = None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term = term, key = key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season = None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term = None, key = None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find( unicode(term).lower() ) > -1:
return self
#end if cur_value.find()
#end for cur_key, cur_value
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive = False,
select_first = False,
debug = False,
cache = True,
banners = False,
actors = False,
custom_ui = None,
language = None,
search_all_languages = False,
apikey = None,
forceConnect=False):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
"""
global lastTimeout
# if we're given a lastTimeout that is less than 1 min just give up
if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
raise tvdb_error("We recently timed out, so giving up early this time")
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
self.urlopener = urllib2.build_opener(
CacheHandler(self.config['cache_location'])
)
elif cache is False:
self.config['cache_enabled'] = False
self.urlopener = urllib2.build_opener() # default opener with no caching
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
self.urlopener = urllib2.build_opener(
CacheHandler(self.config['cache_location'])
)
elif isinstance(cache, urllib2.OpenerDirector):
# If passed something from urllib2.build_opener, use that
log().debug("Using %r as urlopener" % cache)
self.config['cache_enabled'] = True
self.urlopener = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://www.thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
"ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://www.thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
#end __init__
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api
"""
return os.path.join(tempfile.gettempdir(), "tvdb_api")
def _loadUrl(self, url, recache = False):
global lastTimeout
try:
log().debug("Retrieving URL %s" % url)
resp = self.urlopener.open(url)
if 'x-local-cache' in resp.headers:
log().debug("URL %s was cached in %s" % (
url,
resp.headers['x-local-cache'])
)
if recache:
log().debug("Attempting to recache %s" % url)
resp.recache()
except (IOError, urllib2.URLError), errormsg:
if not str(errormsg).startswith('HTTP Error'):
lastTimeout = datetime.datetime.now()
raise tvdb_error("Could not connect to server: %s" % (errormsg))
#end try
# handle gzipped content,
# http://dbr.lighthouseapp.com/projects/13342/tickets/72-gzipped-data-patch
if 'gzip' in resp.headers.get("Content-Encoding", ''):
if gzip:
stream = StringIO.StringIO(resp.read())
gz = gzip.GzipFile(fileobj=stream)
return gz.read()
raise tvdb_error("Received gzip data from thetvdb.com, but could not correctly handle it")
return resp.read()
def _getetsrc(self, url):
"""Loads a URL using caching, returns an ElementTree of the source
"""
src = self._loadUrl(url)
try:
# TVDB doesn't sanitize \r (CR) from user input in some fields,
# remove it to avoid errors. Change from SickBeard, from will14m
return ElementTree.fromstring(src.rstrip("\r"))
except SyntaxError:
src = self._loadUrl(url, recache=True)
try:
return ElementTree.fromstring(src.rstrip("\r"))
except SyntaxError, exceptionmsg:
errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % (
exceptionmsg
)
if self.config['cache_enabled']:
errormsg += "\nFirst try emptying the cache folder at..\n%s" % (
self.config['cache_location']
)
errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on"
errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n"
raise tvdb_error(errormsg)
#end _getetsrc
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show = self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season = self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
#end _set_item
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = data.replace(u"&", u"&")
data = data.strip()
return data
#end _cleanData
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
series = urllib.quote(series.encode("utf-8"))
log().debug("Searching for show %s" % series)
seriesEt = self._getetsrc(self.config['url_getSeries'] % (series))
allSeries = []
for series in seriesEt:
result = dict((k.tag.lower(), k.text) for k in series.getchildren())
result['id'] = int(result['id'])
result['lid'] = self.config['langabbv_to_id'][result['language']]
log().debug('Found series %(seriesname)s' % result)
allSeries.append(result)
#end for series
if len(allSeries) == 0:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)")
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
ui = self.config['custom_ui'](config = self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config = self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config = self.config)
#end if config['interactive]
#end if custom_ui != None
return ui.selectSeries(allSeries)
#end _getSeries
def _parseBanners(self, sid):
"""Parses banners XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://www.thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) )
banners = {}
for cur_banner in bannersEt.findall('Banner'):
bid = cur_banner.find('id').text
btype = cur_banner.find('BannerType')
btype2 = cur_banner.find('BannerType2')
if btype is None or btype2 is None:
continue
btype, btype2 = btype.text, btype2.text
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for cur_element in cur_banner.getchildren():
tag = cur_element.tag.lower()
value = cur_element.text
if tag is None or value is None:
continue
tag, value = tag.lower(), value.lower()
banners[btype][btype2][bid][tag] = value
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://www.thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
cur_actors = Actors()
for curActorItem in actorsEt.findall("Actor"):
curActor = Actor()
for curInfo in curActorItem:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag == "image":
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
curActor[tag] = value
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
for curInfo in seriesInfoEt.findall("Series")[0]:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag in ['banner', 'fanart', 'poster']:
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setShowData(sid, tag, value)
#end for series
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
epsEt = self._getetsrc( self.config['url_epInfo'] % (sid, language) )
for cur_ep in epsEt.findall("Episode"):
seas_no = int(cur_ep.find('SeasonNumber').text)
ep_no = int(cur_ep.find('EpisodeNumber').text)
for cur_item in cur_ep.getchildren():
tag = cur_item.tag.lower()
value = cur_item.text
if value is not None:
if tag == 'filename':
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setItem(sid, seas_no, ep_no, tag, value)
#end for cur_ep
#end _geEps
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]) )
sid = self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries( name )
sname, sid = selected_series['seriesname'], selected_series['id']
log().debug('Got %(seriesname)s, id %(id)s' % selected_series)
self.corrections[name] = sid
self._getShowData(selected_series['id'], selected_series['language'])
#end if name in self.corrections
return sid
#end _nameToSid
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'])
return self.shows[key]
key = key.lower() # make key lower case
sid = self._nameToSid(key)
log().debug('Got series id %s' % (sid))
return self.shows[sid]
#end __getitem__
def __repr__(self):
return str(self.shows)
#end __repr__
#end Tvdb
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
|
|
# Built-in modules #
import os, platform, base64, shutil, gzip
from io import StringIO
# Internal modules #
from plumbing.common import camel_to_snake
from plumbing.cache import property_cached
from plumbing.databases.sqlite_database import SQLiteDatabase
# First party modules #
from autopaths.file_path import FilePath
from autopaths.tmp_path import new_temp_path
# Third party modules #
import pandas
################################################################################
class AccessDatabase(FilePath):
"""
A wrapper for a Microsoft Access database via pyodbc.
On Ubuntu 18 you would install the dependencies like this:
$ sudo apt install python3-pip
$ sudo apt install unixodbc-dev
$ pip install --user pyodbc
"""
# Enable this to change `ThisName` to `this_name` on all columns #
convert_col_names_to_snake = False
# ------------------------------ Constructor ---------------------------- #
def __init__(self, path,
username = 'admin',
password = None):
"""
* The path of the database comes first.
* The username.
* The password.
"""
# Path attribute #
super(AccessDatabase, self).__init__(path)
# Attributes #
self.username = username
self.password = password
# Check the database exists #
self.must_exist()
# ------------------------------ Properties ----------------------------- #
@property_cached
def conn_string(self):
# Get current system #
system = platform.system()
# macOS #
if system == "Darwin":
string = "Driver={Microsoft Access Driver (*.mdb)};User Id='%s';DBQ=%s"
return string % (self.username, self.path)
# Linux #
if os.name == "posix":
string = "Driver={MDBTools};User Id='%s';DBQ=%s"
return string % (self.username, self.path)
# Windows #
if os.name == "nt":
string = "Driver={Microsoft Access Driver (*.mdb, *.accdb)};User Id='%s';DBQ=%s"
return string % (self.username, self.path)
else:
raise Exception("Unrecognized platform.")
@property_cached
def conn(self):
"""To be used externally by the user."""
return self.new_conn()
@property_cached
def own_conn(self):
"""To be used internally in this object."""
return self.new_conn()
@property_cached
def cursor(self):
"""To be used externally by the user."""
return self.conn.cursor()
@property_cached
def own_cursor(self):
"""To be used internally in this object."""
return self.own_conn.cursor()
@property
def tables(self):
"""The complete list of tables."""
# If we are on unix use mdbtools instead #
if os.name == "posix":
import pbs3
mdb_tables = pbs3.Command("mdb-tables")
tables_list = mdb_tables('-1', self.path).split('\n')
condition = lambda t: t and not t.startswith('MSys')
return [t.lower() for t in tables_list if condition(t)]
# Default case #
return [table[2].lower() for table in self.own_cursor.tables()
if not table[2].startswith('MSys')]
@property
def real_tables(self):
"""The complete list of tables excluding views and query tables."""
return [table for table in self.tables if self.test_table(table)]
# ------------------------------- Methods ------------------------------- #
def __getitem__(self, key):
"""Called when evaluating ``database[0] or database['P81239A']``."""
return self.table_as_df(key)
def __contains__(self, key):
"""Called when evaluating ``'students' in database``."""
return key.lower() in self.tables
def test_table(self, table_name):
"""Can the table be read from?"""
import pyodbc
try:
query = "SELECT COUNT (*) FROM `%s`" % table_name.lower()
self.own_cursor.execute(query)
self.own_cursor.fetchone()
except pyodbc.Error:
return False
return True
def new_conn(self):
"""Make a new connection."""
import pyodbc
return pyodbc.connect(self.conn_string)
def close(self):
self.cursor.close()
self.conn.close()
self.own_cursor.close()
self.own_conn.close()
def table_must_exist(self, table_name):
"""Return a table as a dataframe."""
if table_name.lower() not in self.tables:
raise Exception("The table '%s' does not seem to exist." % table_name)
def table_as_df(self, table_name):
"""
Return a table as a dataframe.
There is a library that can do this, but it has a bug.
See https://github.com/jbn/pandas_access/issues/3
import pandas_access
return pandas_access.read_table(self.path, table_name)
This is also a possibility https://github.com/gilesc/mdbread
but it is not in PyPI.
"""
# Check #
self.table_must_exist(table_name)
# If we are on unix use mdb-tools instead #
if os.name == "posix": df = self.table_as_df_via_mdbtools(table_name)
# Default case #
else: df = self.table_as_df_via_query(table_name)
# Optionally rename columns #
if self.convert_col_names_to_snake: df = df.rename(columns=camel_to_snake)
# Return #
return df
def table_as_df_via_query(self, table_name):
"""Use an SQL query to create the dataframe."""
query = "SELECT * FROM `%s`" % table_name.lower()
return pandas.read_sql(query, self.own_conn)
def table_as_df_via_mdbtools(self, table_name, *args, **kwargs):
"""Use an mdbtools executable to create the dataframe."""
import subprocess
cmd = ['mdb-export', self.path, table_name]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return pandas.read_csv(proc.stdout, *args, **kwargs)
def insert_df(self, table_name, df):
"""Create a table and populate it with data from a dataframe."""
df.to_sql(table_name, con=self.own_conn)
def count_rows(self, table_name):
"""Return the number of entries in a table by counting them."""
self.table_must_exist(table_name)
query = "SELECT COUNT (*) FROM `%s`" % table_name.lower()
self.own_cursor.execute(query)
return int(self.own_cursor.fetchone()[0])
def count_rows_fast(self, table_name):
"""Return the number of entries in a table by using the quick inaccurate method."""
pass
def tables_with_counts(self):
"""Return the number of entries in all table."""
table_to_count = lambda t: self.count_rows(t)
return zip(self.tables, map(table_to_count, self.tables))
def drop_table(self, table_name):
if table_name not in self.tables:
raise Exception("The table '%s' does not seem to exist." % table_name)
query = "DROP TABLE %s" % table_name
self.own_conn.execute(query)
# ------------------------------- Convert ------------------------------- #
def convert_to_sqlite(self, destination=None, method="shell", progress=False):
"""Who wants to use Access when you can deal with SQLite databases instead?"""
# Display progress bar #
if progress:
import tqdm
progress = tqdm.tqdm
else:
progress = lambda x:x
# Default path #
if destination is None: destination = self.replace_extension('sqlite')
# Delete if it exists #
destination.remove()
# Method with shell and a temp file #
if method == 'shell': return self.sqlite_by_shell(destination)
# Method without a temp file #
if method == 'object': return self.sqlite_by_object(destination, progress)
# Method with dataframe #
if method == 'dataframe': return self.sqlite_by_df(destination, progress)
def sqlite_by_shell(self, destination):
"""Method with shell and a temp file. This is hopefully fast."""
script_path = new_temp_path()
self.sqlite_dump_shell(script_path)
from shell_command import shell_output
shell_output('sqlite3 -bail -init "%s" "%s" .quit' % (script, destination))
script.remove()
def sqlite_by_object(self, destination, progress):
"""This is probably not very fast."""
db = SQLiteDatabase(destination)
db.create()
for script in self.sqlite_dump_string(progress): db.cursor.executescript(script)
db.close()
def sqlite_by_df(self, destination, progress):
"""Is this fast?"""
db = SQLiteDatabase(destination)
db.create()
for table in progress(self.real_tables): self[table].to_sql(table, con=db.connection)
db.close()
def sqlite_dump_shell(self, script_path):
"""Generate a text dump compatible with SQLite by using
shell commands. Place this script at *script_path*."""
# First the schema #
from shell_command import shell_output
shell_output('mdb-schema "%s" sqlite >> "%s"' % (self.path, script_path))
# Start a transaction, speeds things up when importing #
script_path.append("\n\n\nBEGIN TRANSACTION;\n")
# Then export every table #
for table in self.tables:
command = 'mdb-export -I sqlite "%s" "%s" >> "%s"'
shell_output(command % (self.path, table, script_path))
# End the transaction
script_path.append("\n\n\nEND TRANSACTION;\n")
def sqlite_dump_string(self, progress):
"""Generate a text dump compatible with SQLite.
By yielding every table one by one as a byte string."""
# First the schema #
import pbs3
mdb_schema = pbs3.Command("mdb-schema")
yield mdb_schema(self.path, "sqlite").encode('utf8')
# Start a transaction, speeds things up when importing #
yield "BEGIN TRANSACTION;\n"
# Then export every table #
mdb_export = pbs3.Command("mdb-export")
for table in progress(self.tables):
yield mdb_export('-I', 'sqlite', self.path, table).encode('utf8')
# End the transaction
yield "END TRANSACTION;\n"
# --------------------------- Multi-database ---------------------------- #
def import_table(self, source, table_name):
"""Copy a table from another Access database to this one.
Requires that you have mdbtools command line executables installed
in a Windows Subsystem for Linux environment."""
# Run commands #
import pbs3
wsl = pbs3.Command("wsl.exe")
table_schema = wsl("-e", "mdb-schema", "-T", table_name, source.wsl_style, "access")
table_contents = wsl("-e", "mdb-export", "-I", "access", source.wsl_style, table_name)
# Filter #
table_schema = ' '.join(l for l in table_schema.split('\n') if not l.startswith("--"))
# Execute statements #
self.cursor.execute(str(table_schema))
self.cursor.execute(str(table_contents))
# -------------------------------- Create ------------------------------- #
@classmethod
def create(cls, destination):
"""Create a new empty MDB at destination."""
mdb_gz_b64 = """\
H4sICIenn1gC/25ldzIwMDMubWRiAO2de2wcRx3Hf7O7Pt/d3u6eLyEtVaOaqg+EkjQvuVVDwa9a
jWXHdZxQQlCJ7fOrfp3OTpqkhVxTItFWIhVQVFBRVNIKRaColVpAUKGKRwwFqUAhKiBIpUaoVWP+
qKgIIHL8Znb39u72znWJiWP3+9l473fzm/nNY3cdf2fmbBJEPdO9E+nebLq+fWC6vrWZOImen9D7
9sR+vPPNE0PZxo/TE5879mj+yNc3/OzAD2bXv3DmV9/o/8PZnxxr+/fDL2w79ulzN7e+/sS/zvzz
w3+N1z28p3PTfQ3nfn/m2YmeFS2no89uWnvqwO5HUvd/5Phr938tes3j/zm5+qT41J8/P/iZx87/
+qHrjgyduubG1t/+7eWB2XztTNuT+1clZt9c2/e7HRGizevWEwAAAAAAAACAhUEIwvE+PoRIO8K7
FzT6obPPwTMBAAAAAAAAAABcfpzPXwya+Ispo1xlEO2KEEX9eaGyWnrqyKQ60tQ0AcNZRcR1RYuy
+XZCxoqRzmaMI6cKGRJuJVrIEZUOQ9UrHStUYpyzKkdNmSPFDkM6aguhXMdVHCMuHXE2Suu4IFQJ
l6CErNWUDouDlbdKOZIcrKLD4S5WdNhqIEodqlVaofKgVTHpiBQ6uLG0uaKsuYbf3IS8BmV1qFAm
j1Z5Hbp06GWDKC+DTS00SRN8DFA/TXNfW6mXX3upj7+mOHWllzLAObN8du0gdSdlKO3ZcWqjMbaH
uOQqtidViRF+P0HbOH2c3xm0lfMb1EH7uHZ5vp32c+ks+5PqfSeXS9NejjTAvZQpd7J3kuuJFqLE
qYvuVa3Ocqk7OVXWNMFxZPRVtJ1zSXuCBrlkh+rjEF1Zlt5Dw6qN0xx5Bx3gGgbowVo56EIjkc9T
xX9Jdd+5PKDOD6q3VQvwv7qiZ8st419cdYHlo6iuriF8X4HA590AsodXhvrsj0yMDPnAuI+ZvOrq
1o7K51Hdy7a8cdXNm5AedbfG5W3j3lOybxFZKb6zAgAAAAAAsNzQxAlbvnYJV3VcUU3/S2luBIKF
ha+IlWp+wxW4IiRXRSXxKeNU1eOxUuUbSOIINbEM7WT506ZE3LASgCOeYJWCMcnCsI/u8eSsFEYR
lnlbWa6+u0jTYqSkvuQL9G5CLFwTRBMAAAAAAAAAgMtW/79lyVdLKxW7oqDF3bXOniib0UD/m/xq
loWqvFwt3DX/mrLNALIu3V35NkpK1JDmL+2XOmr9pf1gKiFY4I672wc0mveaf6zaenyKmljPT6t5
hT7a6y13y0XqjFpwneJjRC0oRwvL3eUL2fHCcuyGIntjhTkDuZCd5Vc5j+HNUMyx+myYcpHW5YG5
ZijUdbg2VFu4ZzzcHFM3seQLAAAAAAAAAMtc//9S6cm1emX97ytK1v81rHelhtfVfAFnseZXRdV9
Ad7+dhGS5kbl3eqe/K8pU/nnYwX5X2VeoLbCZwHi7txD6aTELabnoLJ5AfPFC8JmFd3Pun+MlfM4
q/846/4s62i5+8Dmc7EvSVN0UG2tL00p1uPXqZTt/G5QqX+5lbufz+mSctVzFce6upBrTG3Fd+cn
pmiYrUyw8+GNfL4hn8/k83qZrVlyGzgPeqbhjcOqx7KMEZRpU/MPQ+rsldEtuYm8vExkznoMS+6b
KC5TZRt8wVf4xEkFX4V5D/X2vYz1/EcR8yMAAAAAAACAJY0Qf/d3vLPUlb//b4Nzzv6W3Wevtl+1
vmxts2LWTxOHErcm3jGfMUfNG0yMGQAAAAAAeJ/8rLwAMXIYRgCARFv8IIaYtKpGqCdqlN/2kupD
/ob67qXhsi0lDh2Vp6728faO9tHuUflfWJ1wE0e6724f35XuG71r16Dr0FwH573by6rKi0N7RveN
tnd6aTVBWrpjd3fnuJtsBMnDk90ju7zckSA5XGGtdGrK2dWhUnRcMgAAAAAAAAD4v2CIV6vqf82I
Jusbcwsy7wkWSf/n1JQNq/Oc+uQGq/ecmsphYZ6Tn6XwRLjwxb7mTxDoakLgURUFshwAAAAAAAAA
ljpCrHZ8W/f2/2NUAAAAAAAAAAAAhXH5RLm4IIbotqot7hbW/0MGWCp46/+pgpHwjZS3IyAlfMPy
tgakNN+wfcPxNgukdN9I+kadt30gZfhGjW+s8I2V3s6CVNTbWZCK+Eatb3zAN1Z5mw5SMd+I+wZ+
+QQAAAAAAAAA/K8IcdT27Zqi3/+HkQEAAAAAAAAAsGgkMQQLjSHqbQPDAAAAAAAAAAAALGuw/g8A
AAAAAAAA4DJUqwsQI7cQDWlcLiMq1/9rcGMBAAAAAAAAAADLGuh/AAAAAAAAAAAA+h8AAAAAAAAA
AABLHyHusDTPjtLzTtoxnRftUftqe8YatDA+AAAAAAAAAPDeqJN/KVt+et0R9PYnzz7W8PrZRv+V
HblO6qEDNEXbaYDGqJemaYQmaYJThtnK8Gvzb1opfDRTPZmUlxUY86qgm/ZyFVkOOqCC3kLhoyEI
qs8raBO10O0q3EYKH+uDcNq8wnVRH93D7evnYZhHG5kkB3a0OYO2ctCWV9ZR+FhT0l2HCzl6xVBz
XZyPUvi4taTjcwRuVUF7uYW9HMy9MJspfGwMAoo5A+5Qwca8UHN2WogeU/fu0ito1vmjM+M85zzp
fNG5zxl2djrNzk3O9+0m+yWrx2q0fpH4buJ4Yk3ig4lvmkfxx9gBAAAAAAC4OAylQfJ5h5pfSVCc
f853gqSmWPSZux6xjUznltH2HT/flNu7++0NZ7/07cg/vnPbVu30y6d/NLvlabPh+j81v/Xc5g9l
1h2f+epn9+VPdN90OHHvU50fm94y/ZXvWQ/tP/yJG/NH3llz8A79tlNPG72DHSePHdzz2s3XPzVj
vzSUvSHjVys1Rv5CSUv8pEvcEqkbV/KX35JaQ+npikmRS9o4rtYIt8RYnJa4Ou6SV6stTm+l7rcX
q9qSy+23pCVIcgV/SZKuJj5CSRc4Y/PpkiesLJcI53J37NvFuQzv4peGL0/SypP+C+45xVAAMAEA
"""
pristine = StringIO()
pristine.write(base64.b64decode(mdb_gz_b64))
pristine.seek(0)
pristine = gzip.GzipFile(fileobj=pristine, mode='rb')
with open(destination, 'wb') as handle: shutil.copyfileobj(pristine, handle)
return cls(destination)
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
|
"""
Support for Australian BOM (Bureau of Meteorology) weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.bom/
"""
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS, TEMP_CELSIUS, CONF_NAME, ATTR_ATTRIBUTION,
CONF_LATITUDE, CONF_LONGITUDE)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = 'http://www.bom.gov.au/fwo/{}/{}.{}.json'
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = 'last_update'
ATTR_SENSOR_ID = 'sensor_id'
ATTR_STATION_ID = 'station_id'
ATTR_STATION_NAME = 'station_name'
ATTR_ZONE_ID = 'zone_id'
CONF_ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = 'station'
CONF_ZONE_ID = 'zone_id'
CONF_WMO_ID = 'wmo_id'
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
'wmo': ['wmo', None],
'name': ['Station Name', None],
'history_product': ['Zone', None],
'local_date_time': ['Local Time', None],
'local_date_time_full': ['Local Time Full', None],
'aifstime_utc': ['UTC Time Full', None],
'lat': ['Lat', None],
'lon': ['Long', None],
'apparent_t': ['Feels Like C', TEMP_CELSIUS],
'cloud': ['Cloud', None],
'cloud_base_m': ['Cloud Base', None],
'cloud_oktas': ['Cloud Oktas', None],
'cloud_type_id': ['Cloud Type ID', None],
'cloud_type': ['Cloud Type', None],
'delta_t': ['Delta Temp C', TEMP_CELSIUS],
'gust_kmh': ['Wind Gust kmh', 'km/h'],
'gust_kt': ['Wind Gust kt', 'kt'],
'air_temp': ['Air Temp C', TEMP_CELSIUS],
'dewpt': ['Dew Point C', TEMP_CELSIUS],
'press': ['Pressure mb', 'mbar'],
'press_qnh': ['Pressure qnh', 'qnh'],
'press_msl': ['Pressure msl', 'msl'],
'press_tend': ['Pressure Tend', None],
'rain_trace': ['Rain Today', 'mm'],
'rel_hum': ['Relative Humidity', '%'],
'sea_state': ['Sea State', None],
'swell_dir_worded': ['Swell Direction', None],
'swell_height': ['Swell Height', 'm'],
'swell_period': ['Swell Period', None],
'vis_km': ['Visability km', 'km'],
'weather': ['Weather', None],
'wind_dir': ['Wind Direction', None],
'wind_spd_kmh': ['Wind Speed kmh', 'km/h'],
'wind_spd_kt': ['Wind Speed kt', 'kt']
}
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
station = station.replace('.shtml', '')
if not re.fullmatch(r'ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d', station):
raise vol.error.Invalid('Malformed station ID')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Inclusive(CONF_ZONE_ID, 'Deprecated partial station ID'): cv.string,
vol.Inclusive(CONF_WMO_ID, 'Deprecated partial station ID'): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM sensor."""
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION, CONF_ZONE_ID, CONF_WMO_ID)
elif zone_id and wmo_id:
station = '{}.{}'.format(zone_id, wmo_id)
else:
station = closest_station(
config.get(CONF_LATITUDE), config.get(CONF_LONGITUDE),
hass.config.config_dir)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities([BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]])
class BOMCurrentSensor(Entity):
"""Implementation of a BOM current sensor."""
def __init__(self, bom_data, condition, stationname):
"""Initialize the sensor."""
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
if self.stationname is None:
return 'BOM {}'.format(SENSOR_TYPES[self._condition][0])
return 'BOM {} {}'.format(
self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
"""Return the state of the sensor."""
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data['wmo'],
ATTR_STATION_NAME: self.bom_data.latest_data['name'],
ATTR_ZONE_ID: self.bom_data.latest_data['history_product'],
}
return attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.bom_data.update()
class BOMCurrentData:
"""Get data from BOM."""
def __init__(self, station_id):
"""Initialize the data object."""
self._zone_id, self._wmo_id = station_id.split('.')
self._data = None
self.last_updated = None
def _build_url(self):
"""Build the URL for the requests."""
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
"""Return the latest data object."""
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
"""Return the value for the given condition.
BOM weather publishes condition readings for weather (and a few other
conditions) at intervals throughout the day. To avoid a `-` value in
the frontend for these conditions, we traverse the historical data
for the latest value that is not `-`.
Iterators are used in this method to avoid iterating needlessly
through the entire BOM provided dataset.
"""
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != '-'), None)
def should_update(self):
"""Determine whether an update should occur.
BOM provides updated data every 30 minutes. We manually define
refreshing logic here rather than a throttle to keep updates
in lock-step with BOM.
If 35 minutes has passed since the last BOM data update, then
an update should be done.
"""
if self.last_updated is None:
# Never updated before, therefore an update should occur.
return True
now = datetime.datetime.now()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from BOM."""
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(datetime.datetime.now() - self.last_updated),
datetime.datetime.now(), self.last_updated)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result['observations']['data']
# set lastupdate using self._data[0] as the first element in the
# array is the latest date in the json
self.last_updated = datetime.datetime.strptime(
str(self._data[0]['local_date_time_full']), '%Y%m%d%H%M%S')
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
This function does several MB of internet requests, so please use the
caching version to minimise latency and hit-count.
"""
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP('ftp.bom.gov.au') as ftp:
ftp.login()
ftp.cwd('anon2/home/ncc/metadata/sitelists')
ftp.retrbinary('RETR stations.zip', file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open('stations.txt') as station_txt:
for _ in range(4):
station_txt.readline() # skip header
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break # end while loop, ignoring any footer text
wmo, lat, lon = (line[a:b].strip() for a, b in
[(128, 134), (70, 78), (79, 88)])
if wmo != '..':
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">')
for state in ('nsw', 'vic', 'qld', 'wa', 'tas', 'nt'):
url = 'http://www.bom.gov.au/{0}/observations/{0}all.shtml'.format(
state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {'{}.{}'.format(zones[k], k): latlon[k]
for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
Results from internet requests are cached as compressed JSON, making
subsequent calls very much faster.
"""
cache_file = os.path.join(cache_dir, '.bom-stations.json.gz')
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, 'wt') as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, 'rt') as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
"""Return the ZONE_ID.WMO_ID of the closest station to our lat/lon."""
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
"""Create a psudeo-distance from latitude/longitude."""
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
|
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = tf.compat.v1.flags.FLAGS
# Number of steps to train model.
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(UnidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(
data_dir, fake_data=True, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.compat.v1.lite.experimental.nn.TfLiteRNNCell(
self.num_units, name="rnn1"),
tf.compat.v1.lite.experimental.nn.TfLiteRNNCell(
self.num_units, name="rnn2")
])
def buildModel(self, rnn_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random.normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
# input image placeholder
x = tf.compat.v1.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
rnn_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.compat.v1.lite.experimental.nn.dynamic_rnn(
rnn_layer, rnn_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.compat.v1.nn.static_rnn(
rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.compat.v1.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.compat.v1.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
sess.run(tf.compat.v1.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, rnn_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(rnn_layer, is_dynamic_rnn)
new_sess = tf.compat.v1.Session()
saver = tf.compat.v1.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self,
sess,
test_inputs,
input_tensor,
output_tensor,
use_mlir_converter=False):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
use_mlir_converter: Whether or not to use MLIRConverter to convert the
model.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
converter.experimental_new_converter = use_mlir_converter
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session()
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
# Test MLIR-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, True)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session()
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.compat.v1.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
# Test MLIR-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, True)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
tf.disable_v2_behavior()
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Markus Hutzler
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import argparse
__version__ = '0.1'
class Command(object):
"""Argument Parser Command class."""
__desc__ = ""
__help__ = ""
__cmd__ = None
def __init__(self):
self._parser = None
@classmethod
def _add_arguments_cls(cls, parser):
self = cls.__new__(cls)
self.add_arguments(parser)
def add_arguments(self, parser):
"""Add extra command specific arguments.
Args:
parser (`argparse.ArgumentParser`): Parser to manipulate"""
pass
def run(self, args):
"""Run the actual command.
Args:
args (`Namespace`): Result of parse_args()"""
pass
class Command2Arg(object):
"""Wrapper class to allow commands to be displayed as arguments."""
def __init__(self, cmd):
self.dest = ""
self.option_strings = []
self.metavar = cmd.__cmd__
self.help = cmd.__help__
class ArgumentParser(argparse.ArgumentParser):
"""Argument parser subclass."""
def __init__(self, *a, **k):
argparse.ArgumentParser.__init__(self, *a, **k)
self.commands = []
self.selected_command = None
self.selected_command_key = None
def format_usage(self):
"""Overwrites original format_usage and injects the command."""
if not self.commands:
return super(ArgumentParser, self).format_usage()
prog = self.prog
if self.selected_command_key:
self.prog = prog + " " + self.selected_command_key
else:
self.prog = prog + " command"
usage = super(ArgumentParser, self).format_usage()
self.prog = prog
return usage
def format_help(self, show_cmd=False):
"""Overwrites format_help. (Adds selected command info)
Args:
show_cmd (`bool`): Add list of commands to help message."""
if not self.commands:
return super(ArgumentParser, self).format_help()
description = self.description
self.description = "{% usage-replace %}"
prog = self.prog
if self.selected_command_key:
self.prog = prog + " " + self.selected_command_key
else:
self.prog = prog + " command"
text = super(ArgumentParser, self).format_help()
self.prog = prog
self.description = description
formatter = self._get_formatter()
formatter.add_text(self.description)
if self.selected_command:
formatter.add_text(self.selected_command.__help__)
formatter.add_text(self.selected_command.__desc__)
insertion = formatter.format_help().strip()
text = text.replace("{% usage-replace %}", insertion)
if show_cmd:
formatter = self._get_formatter()
formatter.start_section("commands")
for command in self.commands:
formatter.add_argument(Command2Arg(command))
formatter.end_section()
text += "\n" + formatter.format_help()
return text
def error_command(self, message):
"""Prints error message for command related errors and exits.
Args:
message (`string`): Error message."""
usage = self.format_usage()
usage += "%s: error: %s\n" % (self.prog, message)
usage += "See '%s -h\n" % (self.prog)
sys.stderr.write(usage)
sys.exit(2)
def add_argument(self, *args, **kwargs):
"""Check for command as parameter or destination."""
dest = kwargs.get("dest", None)
if dest == "command":
raise ValueError("command can't be used as destination")
if not dest and "--command" in args:
raise ValueError("--command can not be used as parameter")
return argparse.ArgumentParser.add_argument(self, *args, **kwargs)
def parse_args(self, args=None, namespace=None):
"""Overwrites original args. (Adds the command to the namespace)"""
if not self.commands:
return argparse.ArgumentParser.parse_args(self, args=args,
namespace=namespace)
cmd = None
if args is None:
args = sys.argv[1:]
try:
cmd_key = args.pop(0)
except IndexError:
self.error_command("command missing")
# The only option without command is -h
if cmd_key == "-h":
usage = self.format_help(show_cmd=True)
self._print_message(usage)
exit()
for command in self.commands:
if command.__cmd__ == cmd_key:
cmd = command
break
if not cmd:
self.error_command("command '%s': unknown" % cmd_key)
self.selected_command_key = cmd.__cmd__
self.selected_command = cmd
# Add command specific arguments
cmd._add_arguments_cls(self)
args = argparse.ArgumentParser.parse_args(self, args=args,
namespace=namespace)
setattr(args, "command", cmd)
return args
def add_command(self, command_class):
"""Add command class to parser.
Args:
command_class (`Command`): Command class."""
if not command_class.__cmd__:
raise ValueError("Command does not specify command name.")
if not command_class.__help__:
raise ValueError("Command does not specify command help.")
for cmd in self.commands:
if cmd.__cmd__ == command_class.__cmd__:
raise ValueError("Duplicate command name added.")
self.commands.append(command_class)
|
|
#
#
# Copyright (C) 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the iallocator code."""
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import outils
from ganeti import opcodes
from ganeti import rpc
from ganeti import serializer
from ganeti import utils
import ganeti.masterd.instance as gmi
_STRING_LIST = ht.TListOf(ht.TString)
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
# pylint: disable=E1101
# Class '...' has no 'OP_ID' member
"OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
opcodes.OpInstanceMigrate.OP_ID,
opcodes.OpInstanceReplaceDisks.OP_ID]),
})))
_NEVAC_MOVED = \
ht.TListOf(ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TNonEmptyString,
ht.TNonEmptyString,
ht.TListOf(ht.TNonEmptyString),
])))
_NEVAC_FAILED = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TNonEmptyString,
ht.TMaybeString,
])))
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
_INST_NAME = ("name", ht.TNonEmptyString)
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
class _AutoReqParam(outils.AutoSlots):
"""Meta class for request definitions.
"""
@classmethod
def _GetSlots(mcs, attrs):
"""Extract the slots out of REQ_PARAMS.
"""
params = attrs.setdefault("REQ_PARAMS", [])
return [slot for (slot, _) in params]
class IARequestBase(outils.ValidatedSlots):
"""A generic IAllocator request object.
"""
__metaclass__ = _AutoReqParam
MODE = NotImplemented
REQ_PARAMS = []
REQ_RESULT = NotImplemented
def __init__(self, **kwargs):
"""Constructor for IARequestBase.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
REQ_PARAMS attribute for this class.
"""
outils.ValidatedSlots.__init__(self, **kwargs)
self.Validate()
def Validate(self):
"""Validates all parameters of the request.
"""
assert self.MODE in constants.VALID_IALLOCATOR_MODES
for (param, validator) in self.REQ_PARAMS:
if not hasattr(self, param):
raise errors.OpPrereqError("Request is missing '%s' parameter" % param,
errors.ECODE_INVAL)
value = getattr(self, param)
if not validator(value):
raise errors.OpPrereqError(("Request parameter '%s' has invalid"
" type %s/value %s") %
(param, type(value), value),
errors.ECODE_INVAL)
def GetRequest(self, cfg):
"""Gets the request data dict.
@param cfg: The configuration instance
"""
raise NotImplementedError
def ValidateResult(self, ia, result):
"""Validates the result of an request.
@param ia: The IAllocator instance
@param result: The IAllocator run result
@raises ResultValidationError: If validation fails
"""
if ia.success and not self.REQ_RESULT(result):
raise errors.ResultValidationError("iallocator returned invalid result,"
" expected %s, got %s" %
(self.REQ_RESULT, result))
class IAReqInstanceAlloc(IARequestBase):
"""An instance allocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_ALLOC
REQ_PARAMS = [
_INST_NAME,
("memory", ht.TNonNegativeInt),
("spindle_use", ht.TNonNegativeInt),
("disks", ht.TListOf(ht.TDict)),
("disk_template", ht.TString),
("os", ht.TString),
("tags", _STRING_LIST),
("nics", ht.TListOf(ht.TDict)),
("vcpus", ht.TInt),
("hypervisor", ht.TString),
("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
]
REQ_RESULT = ht.TList
def RequiredNodes(self):
"""Calculates the required nodes based on the disk_template.
"""
if self.disk_template in constants.DTS_INT_MIRROR:
return 2
else:
return 1
def GetRequest(self, cfg):
"""Requests a new instance.
The checks for the completeness of the opcode must have already been
done.
"""
disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks)
return {
"name": self.name,
"disk_template": self.disk_template,
"tags": self.tags,
"os": self.os,
"vcpus": self.vcpus,
"memory": self.memory,
"spindle_use": self.spindle_use,
"disks": self.disks,
"disk_space_total": disk_space,
"nics": self.nics,
"required_nodes": self.RequiredNodes(),
"hypervisor": self.hypervisor,
}
def ValidateResult(self, ia, result):
"""Validates an single instance allocation request.
"""
IARequestBase.ValidateResult(self, ia, result)
if ia.success and len(result) != self.RequiredNodes():
raise errors.ResultValidationError("iallocator returned invalid number"
" of nodes (%s), required %s" %
(len(result), self.RequiredNodes()))
class IAReqMultiInstanceAlloc(IARequestBase):
"""An multi instance allocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
REQ_PARAMS = [
("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
]
_MASUCCESS = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TNonEmptyString,
ht.TListOf(ht.TNonEmptyString),
])))
_MAFAILED = ht.TListOf(ht.TNonEmptyString)
REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TItems([_MASUCCESS, _MAFAILED]))
def GetRequest(self, cfg):
return {
"instances": [iareq.GetRequest(cfg) for iareq in self.instances],
}
class IAReqRelocate(IARequestBase):
"""A relocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_RELOC
REQ_PARAMS = [
_INST_UUID,
("relocate_from_node_uuids", _STRING_LIST),
]
REQ_RESULT = ht.TList
def GetRequest(self, cfg):
"""Request an relocation of an instance
The checks for the completeness of the opcode must have already been
done.
"""
instance = cfg.GetInstanceInfo(self.inst_uuid)
if instance is None:
raise errors.ProgrammerError("Unknown instance '%s' passed to"
" IAllocator" % self.inst_uuid)
if instance.disk_template not in constants.DTS_MIRRORED:
raise errors.OpPrereqError("Can't relocate non-mirrored instances",
errors.ECODE_INVAL)
if (instance.disk_template in constants.DTS_INT_MIRROR and
len(instance.secondary_nodes) != 1):
raise errors.OpPrereqError("Instance has not exactly one secondary node",
errors.ECODE_STATE)
disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes)
return {
"name": instance.name,
"disk_space_total": disk_space,
"required_nodes": 1,
"relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
}
def ValidateResult(self, ia, result):
"""Validates the result of an relocation request.
"""
IARequestBase.ValidateResult(self, ia, result)
node2group = dict((name, ndata["group"])
for (name, ndata) in ia.in_data["nodes"].items())
fn = compat.partial(self._NodesToGroups, node2group,
ia.in_data["nodegroups"])
instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
ia.cfg.GetNodeNames([instance.primary_node]))
result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
if ia.success and not set(result_groups).issubset(request_groups):
raise errors.ResultValidationError("Groups of nodes returned by"
" iallocator (%s) differ from original"
" groups (%s)" %
(utils.CommaJoin(result_groups),
utils.CommaJoin(request_groups)))
@staticmethod
def _NodesToGroups(node2group, groups, nodes):
"""Returns a list of unique group names for a list of nodes.
@type node2group: dict
@param node2group: Map from node name to group UUID
@type groups: dict
@param groups: Group information
@type nodes: list
@param nodes: Node names
"""
result = set()
for node in nodes:
try:
group_uuid = node2group[node]
except KeyError:
# Ignore unknown node
pass
else:
try:
group = groups[group_uuid]
except KeyError:
# Can't find group, let's use UUID
group_name = group_uuid
else:
group_name = group["name"]
result.add(group_name)
return sorted(result)
class IAReqNodeEvac(IARequestBase):
"""A node evacuation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_NODE_EVAC
REQ_PARAMS = [
("instances", _STRING_LIST),
("evac_mode", ht.TEvacMode),
]
REQ_RESULT = _NEVAC_RESULT
def GetRequest(self, cfg):
"""Get data for node-evacuate requests.
"""
return {
"instances": self.instances,
"evac_mode": self.evac_mode,
}
class IAReqGroupChange(IARequestBase):
"""A group change request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_CHG_GROUP
REQ_PARAMS = [
("instances", _STRING_LIST),
("target_groups", _STRING_LIST),
]
REQ_RESULT = _NEVAC_RESULT
def GetRequest(self, cfg):
"""Get data for node-evacuate requests.
"""
return {
"instances": self.instances,
"target_groups": self.target_groups,
}
class IAllocator(object):
"""IAllocator framework.
An IAllocator instance has three sets of attributes:
- cfg that is needed to query the cluster
- input data (all members of the _KEYS class attribute are required)
- four buffer attributes (in|out_data|text), that represent the
input (to the external script) in text and data structure format,
and the output from it, again in two formats
- the result variables from the script (success, info, nodes) for
easy usage
"""
# pylint: disable=R0902
# lots of instance attributes
def __init__(self, cfg, rpc_runner, req):
self.cfg = cfg
self.rpc = rpc_runner
self.req = req
# init buffer variables
self.in_text = self.out_text = self.in_data = self.out_data = None
# init result fields
self.success = self.info = self.result = None
self._BuildInputData(req)
def _ComputeClusterDataNodeInfo(self, disk_templates, node_list,
cluster_info, hypervisor_name):
"""Prepare and execute node info call.
@type disk_templates: list of string
@param disk_templates: the disk templates of the instances to be allocated
@type node_list: list of strings
@param node_list: list of nodes' UUIDs
@type cluster_info: L{objects.Cluster}
@param cluster_info: the cluster's information from the config
@type hypervisor_name: string
@param hypervisor_name: the hypervisor name
@rtype: same as the result of the node info RPC call
@return: the result of the node info RPC call
"""
storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates)
storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
node_list)
hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
return self.rpc.call_node_info(node_list, storage_units, hvspecs)
def _ComputeClusterData(self, disk_template=None):
"""Compute the generic allocator input data.
@type disk_template: list of string
@param disk_template: the disk templates of the instances to be allocated
"""
cluster_info = self.cfg.GetClusterInfo()
# cluster data
data = {
"version": constants.IALLOCATOR_VERSION,
"cluster_name": self.cfg.GetClusterName(),
"cluster_tags": list(cluster_info.GetTags()),
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
"ipolicy": cluster_info.ipolicy,
}
ninfo = self.cfg.GetAllNodesInfo()
iinfo = self.cfg.GetAllInstancesInfo().values()
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
# node data
node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
if isinstance(self.req, IAReqInstanceAlloc):
hypervisor_name = self.req.hypervisor
node_whitelist = self.req.node_whitelist
elif isinstance(self.req, IAReqRelocate):
hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor
node_whitelist = None
else:
hypervisor_name = cluster_info.primary_hypervisor
node_whitelist = None
if not disk_template:
disk_template = cluster_info.enabled_disk_templates[0]
node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list,
cluster_info, hypervisor_name)
node_iinfo = \
self.rpc.call_all_instances_info(node_list,
cluster_info.enabled_hypervisors,
cluster_info.hvparams)
data["nodegroups"] = self._ComputeNodeGroupData(self.cfg)
config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist)
data["nodes"] = self._ComputeDynamicNodeData(
ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template)
assert len(data["nodes"]) == len(ninfo), \
"Incomplete node data computed"
data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info,
i_list)
self.in_data = data
@staticmethod
def _ComputeNodeGroupData(cfg):
"""Compute node groups data.
"""
cluster = cfg.GetClusterInfo()
ng = dict((guuid, {
"name": gdata.name,
"alloc_policy": gdata.alloc_policy,
"networks": [net_uuid for net_uuid, _ in gdata.networks.items()],
"ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
"tags": list(gdata.GetTags()),
})
for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
return ng
@staticmethod
def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
"""Compute global node data.
@rtype: dict
@returns: a dict of name: (node dict, node config)
"""
# fill in static (config-based) values
node_results = dict((ninfo.name, {
"tags": list(ninfo.GetTags()),
"primary_ip": ninfo.primary_ip,
"secondary_ip": ninfo.secondary_ip,
"offline": (ninfo.offline or
not (node_whitelist is None or
ninfo.name in node_whitelist)),
"drained": ninfo.drained,
"master_candidate": ninfo.master_candidate,
"group": ninfo.group,
"master_capable": ninfo.master_capable,
"vm_capable": ninfo.vm_capable,
"ndparams": cfg.GetNdParams(ninfo),
})
for ninfo in node_cfg.values())
return node_results
@staticmethod
def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
"""Extract an attribute from the hypervisor's node information.
This is a helper function to extract data from the hypervisor's information
about the node, as part of the result of a node_info query.
@type hv_info: dict of strings
@param hv_info: dictionary of node information from the hypervisor
@type node_name: string
@param node_name: name of the node
@type attr: string
@param attr: key of the attribute in the hv_info dictionary
@rtype: integer
@return: the value of the attribute
@raises errors.OpExecError: if key not in dictionary or value not
integer
"""
if attr not in hv_info:
raise errors.OpExecError("Node '%s' didn't return attribute"
" '%s'" % (node_name, attr))
value = hv_info[attr]
if not isinstance(value, int):
raise errors.OpExecError("Node '%s' returned invalid value"
" for '%s': %s" %
(node_name, attr, value))
return value
@staticmethod
def _ComputeStorageDataFromSpaceInfoByTemplate(
space_info, node_name, disk_template):
"""Extract storage data from node info.
@type space_info: see result of the RPC call node info
@param space_info: the storage reporting part of the result of the RPC call
node info
@type node_name: string
@param node_name: the node's name
@type disk_template: string
@param disk_template: the disk template to report space for
@rtype: 4-tuple of integers
@return: tuple of storage info (total_disk, free_disk, total_spindles,
free_spindles)
"""
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
if storage_type not in constants.STS_REPORT:
total_disk = total_spindles = 0
free_disk = free_spindles = 0
else:
template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate(
space_info, disk_template)
if not template_space_info:
raise errors.OpExecError("Node '%s' didn't return space info for disk"
"template '%s'" % (node_name, disk_template))
total_disk = template_space_info["storage_size"]
free_disk = template_space_info["storage_free"]
total_spindles = 0
free_spindles = 0
if disk_template in constants.DTS_LVM:
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_PV)
if lvm_pv_info:
total_spindles = lvm_pv_info["storage_size"]
free_spindles = lvm_pv_info["storage_free"]
return (total_disk, free_disk, total_spindles, free_spindles)
@staticmethod
def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
"""Extract storage data from node info.
@type space_info: see result of the RPC call node info
@param space_info: the storage reporting part of the result of the RPC call
node info
@type node_name: string
@param node_name: the node's name
@type has_lvm: boolean
@param has_lvm: whether or not LVM storage information is requested
@rtype: 4-tuple of integers
@return: tuple of storage info (total_disk, free_disk, total_spindles,
free_spindles)
"""
# TODO: replace this with proper storage reporting
if has_lvm:
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
% (node_name))
total_disk = lvm_vg_info["storage_size"]
free_disk = lvm_vg_info["storage_free"]
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_PV)
if not lvm_pv_info:
raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
% (node_name))
total_spindles = lvm_pv_info["storage_size"]
free_spindles = lvm_pv_info["storage_free"]
else:
# we didn't even ask the node for VG status, so use zeros
total_disk = free_disk = 0
total_spindles = free_spindles = 0
return (total_disk, free_disk, total_spindles, free_spindles)
@staticmethod
def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid,
input_mem_free):
"""Compute memory used by primary instances.
@rtype: tuple (int, int, int)
@returns: A tuple of three integers: 1. the sum of memory used by primary
instances on the node (including the ones that are currently down), 2.
the sum of memory used by primary instances of the node that are up, 3.
the amount of memory that is free on the node considering the current
usage of the instances.
"""
i_p_mem = i_p_up_mem = 0
mem_free = input_mem_free
for iinfo, beinfo in instance_list:
if iinfo.primary_node == node_uuid:
i_p_mem += beinfo[constants.BE_MAXMEM]
if iinfo.name not in node_instances_info[node_uuid].payload:
i_used_mem = 0
else:
i_used_mem = int(node_instances_info[node_uuid]
.payload[iinfo.name]["memory"])
i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
mem_free -= max(0, i_mem_diff)
if iinfo.admin_state == constants.ADMINST_UP:
i_p_up_mem += beinfo[constants.BE_MAXMEM]
return (i_p_mem, i_p_up_mem, mem_free)
def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list,
node_results, disk_template):
"""Compute global node data.
@param node_results: the basic node structures as filled from the config
"""
#TODO(dynmem): compute the right data on MAX and MIN memory
# make a copy of the current dict
node_results = dict(node_results)
for nuuid, nresult in node_data.items():
ninfo = node_cfg[nuuid]
assert ninfo.name in node_results, "Missing basic data for node %s" % \
ninfo.name
if not ninfo.offline:
nresult.Raise("Can't get data for node %s" % ninfo.name)
node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
ninfo.name)
(_, space_info, (hv_info, )) = nresult.payload
mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
"memory_free")
(i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
i_list, node_iinfo, nuuid, mem_free)
(total_disk, free_disk, total_spindles, free_spindles) = \
self._ComputeStorageDataFromSpaceInfoByTemplate(
space_info, ninfo.name, disk_template)
# compute memory used by instances
pnr_dyn = {
"total_memory": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "memory_total"),
"reserved_memory": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "memory_dom0"),
"free_memory": mem_free,
"total_disk": total_disk,
"free_disk": free_disk,
"total_spindles": total_spindles,
"free_spindles": free_spindles,
"total_cpus": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "cpu_total"),
"reserved_cpus": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "cpu_dom0"),
"i_pri_memory": i_p_mem,
"i_pri_up_memory": i_p_up_mem,
}
pnr_dyn.update(node_results[ninfo.name])
node_results[ninfo.name] = pnr_dyn
return node_results
@staticmethod
def _ComputeInstanceData(cfg, cluster_info, i_list):
"""Compute global instance data.
"""
instance_data = {}
for iinfo, beinfo in i_list:
nic_data = []
for nic in iinfo.nics:
filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
nic_dict = {
"mac": nic.mac,
"ip": nic.ip,
"mode": filled_params[constants.NIC_MODE],
"link": filled_params[constants.NIC_LINK],
}
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
nic_dict["bridge"] = filled_params[constants.NIC_LINK]
nic_data.append(nic_dict)
pir = {
"tags": list(iinfo.GetTags()),
"admin_state": iinfo.admin_state,
"vcpus": beinfo[constants.BE_VCPUS],
"memory": beinfo[constants.BE_MAXMEM],
"spindle_use": beinfo[constants.BE_SPINDLE_USE],
"os": iinfo.os,
"nodes": [cfg.GetNodeName(iinfo.primary_node)] +
cfg.GetNodeNames(iinfo.secondary_nodes),
"nics": nic_data,
"disks": [{constants.IDISK_SIZE: dsk.size,
constants.IDISK_MODE: dsk.mode,
constants.IDISK_SPINDLES: dsk.spindles}
for dsk in iinfo.disks],
"disk_template": iinfo.disk_template,
"disks_active": iinfo.disks_active,
"hypervisor": iinfo.hypervisor,
}
pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
pir["disks"])
instance_data[iinfo.name] = pir
return instance_data
def _BuildInputData(self, req):
"""Build input data structures.
"""
request = req.GetRequest(self.cfg)
disk_template = None
if "disk_template" in request:
disk_template = request["disk_template"]
self._ComputeClusterData(disk_template=disk_template)
request["type"] = req.MODE
self.in_data["request"] = request
self.in_text = serializer.Dump(self.in_data)
def Run(self, name, validate=True, call_fn=None):
"""Run an instance allocator and return the results.
"""
if call_fn is None:
call_fn = self.rpc.call_iallocator_runner
result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
result.Raise("Failure while running the iallocator script")
self.out_text = result.payload
if validate:
self._ValidateResult()
def _ValidateResult(self):
"""Process the allocator results.
This will process and if successful save the result in
self.out_data and the other parameters.
"""
try:
rdict = serializer.Load(self.out_text)
except Exception, err:
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
if not isinstance(rdict, dict):
raise errors.OpExecError("Can't parse iallocator results: not a dict")
# TODO: remove backwards compatiblity in later versions
if "nodes" in rdict and "result" not in rdict:
rdict["result"] = rdict["nodes"]
del rdict["nodes"]
for key in "success", "info", "result":
if key not in rdict:
raise errors.OpExecError("Can't parse iallocator results:"
" missing key '%s'" % key)
setattr(self, key, rdict[key])
self.req.ValidateResult(self, self.result)
self.out_data = rdict
|
|
"""Implementation of the musiccast media player."""
from __future__ import annotations
import logging
from aiomusiccast import MusicCastGroupException, MusicCastMediaContent
from aiomusiccast.features import ZoneFeature
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA,
BrowseMedia,
MediaPlayerEntity,
)
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_TRACK,
MEDIA_TYPE_MUSIC,
REPEAT_MODE_OFF,
SUPPORT_BROWSE_MEDIA,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.util import uuid
from . import MusicCastDataUpdateCoordinator, MusicCastDeviceEntity
from .const import (
ATTR_MAIN_SYNC,
ATTR_MC_LINK,
DEFAULT_ZONE,
DOMAIN,
HA_REPEAT_MODE_TO_MC_MAPPING,
INTERVAL_SECONDS,
MC_REPEAT_MODE_TO_HA_MAPPING,
MEDIA_CLASS_MAPPING,
NULL_GROUP,
)
_LOGGER = logging.getLogger(__name__)
MUSIC_PLAYER_BASE_SUPPORT = (
SUPPORT_SHUFFLE_SET
| SUPPORT_REPEAT_SET
| SUPPORT_SELECT_SOUND_MODE
| SUPPORT_SELECT_SOURCE
| SUPPORT_GROUPING
| SUPPORT_PLAY_MEDIA
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=5000): cv.port,
vol.Optional(INTERVAL_SECONDS, default=0): cv.positive_int,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config,
async_add_devices: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import legacy configurations."""
if hass.config_entries.async_entries(DOMAIN) and config[CONF_HOST] not in [
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
]:
_LOGGER.error(
"Configuration in configuration.yaml is not supported anymore. "
"Please add this device using the config flow: %s",
config[CONF_HOST],
)
else:
_LOGGER.warning(
"Configuration in configuration.yaml is deprecated. Use the config flow instead"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up MusicCast sensor based on a config entry."""
coordinator: MusicCastDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
name = coordinator.data.network_name
media_players: list[Entity] = []
for zone in coordinator.data.zones:
zone_name = name if zone == DEFAULT_ZONE else f"{name} {zone}"
media_players.append(
MusicCastMediaPlayer(zone, zone_name, entry.entry_id, coordinator)
)
async_add_entities(media_players)
class MusicCastMediaPlayer(MusicCastDeviceEntity, MediaPlayerEntity):
"""The musiccast media player."""
def __init__(self, zone_id, name, entry_id, coordinator):
"""Initialize the musiccast device."""
self._player_state = STATE_PLAYING
self._volume_muted = False
self._shuffle = False
self._zone_id = zone_id
super().__init__(
name=name,
icon="mdi:speaker",
coordinator=coordinator,
)
self._volume_min = self.coordinator.data.zones[self._zone_id].min_volume
self._volume_max = self.coordinator.data.zones[self._zone_id].max_volume
self._cur_track = 0
self._repeat = REPEAT_MODE_OFF
async def async_added_to_hass(self):
"""Run when this Entity has been added to HA."""
await super().async_added_to_hass()
self.coordinator.entities.append(self)
# Sensors should also register callbacks to HA when their state changes
self.coordinator.musiccast.register_callback(self.async_write_ha_state)
self.coordinator.musiccast.register_group_update_callback(
self.update_all_mc_entities
)
self.coordinator.async_add_listener(self.async_schedule_check_client_list)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
await super().async_will_remove_from_hass()
self.coordinator.entities.remove(self)
# The opposite of async_added_to_hass. Remove any registered call backs here.
self.coordinator.musiccast.remove_callback(self.async_write_ha_state)
self.coordinator.musiccast.remove_group_update_callback(
self.update_all_mc_entities
)
self.coordinator.async_remove_listener(self.async_schedule_check_client_list)
@property
def should_poll(self):
"""Push an update after each command."""
return False
@property
def ip_address(self):
"""Return the ip address of the musiccast device."""
return self.coordinator.musiccast.ip
@property
def zone_id(self):
"""Return the zone id of the musiccast device."""
return self._zone_id
@property
def _is_netusb(self):
return (
self.coordinator.data.netusb_input
== self.coordinator.data.zones[self._zone_id].input
)
@property
def _is_tuner(self):
return self.coordinator.data.zones[self._zone_id].input == "tuner"
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the player."""
if self.coordinator.data.zones[self._zone_id].power == "on":
if self._is_netusb and self.coordinator.data.netusb_playback == "pause":
return STATE_PAUSED
if self._is_netusb and self.coordinator.data.netusb_playback == "stop":
return STATE_IDLE
return STATE_PLAYING
return STATE_OFF
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
if ZoneFeature.VOLUME in self.coordinator.data.zones[self._zone_id].features:
volume = self.coordinator.data.zones[self._zone_id].current_volume
return (volume - self._volume_min) / (self._volume_max - self._volume_min)
return None
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if ZoneFeature.VOLUME in self.coordinator.data.zones[self._zone_id].features:
return self.coordinator.data.zones[self._zone_id].mute
return None
@property
def shuffle(self):
"""Boolean if shuffling is enabled."""
return (
self.coordinator.data.netusb_shuffle == "on" if self._is_netusb else False
)
@property
def sound_mode(self):
"""Return the current sound mode."""
return self.coordinator.data.zones[self._zone_id].sound_program
@property
def sound_mode_list(self):
"""Return a list of available sound modes."""
return self.coordinator.data.zones[self._zone_id].sound_program_list
@property
def zone(self):
"""Return the zone of the media player."""
return self._zone_id
@property
def unique_id(self) -> str:
"""Return the unique ID for this media_player."""
return f"{self.coordinator.data.device_id}_{self._zone_id}"
async def async_turn_on(self):
"""Turn the media player on."""
await self.coordinator.musiccast.turn_on(self._zone_id)
self.async_write_ha_state()
async def async_turn_off(self):
"""Turn the media player off."""
await self.coordinator.musiccast.turn_off(self._zone_id)
self.async_write_ha_state()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.coordinator.musiccast.mute_volume(self._zone_id, mute)
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level, range 0..1."""
await self.coordinator.musiccast.set_volume_level(self._zone_id, volume)
self.async_write_ha_state()
async def async_media_play(self):
"""Send play command."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_play()
else:
raise HomeAssistantError(
"Service play is not supported for non NetUSB sources."
)
async def async_media_pause(self):
"""Send pause command."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_pause()
else:
raise HomeAssistantError(
"Service pause is not supported for non NetUSB sources."
)
async def async_media_stop(self):
"""Send stop command."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_pause()
else:
raise HomeAssistantError(
"Service stop is not supported for non NetUSB sources."
)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_shuffle(shuffle)
else:
raise HomeAssistantError(
"Service shuffle is not supported for non NetUSB sources."
)
async def async_play_media(self, media_type: str, media_id: str, **kwargs) -> None:
"""Play media."""
if self.state == STATE_OFF:
await self.async_turn_on()
if media_id:
parts = media_id.split(":")
if parts[0] == "list":
index = parts[3]
if index == "-1":
index = "0"
await self.coordinator.musiccast.play_list_media(index, self._zone_id)
return
if parts[0] == "presets":
index = parts[1]
await self.coordinator.musiccast.recall_netusb_preset(
self._zone_id, index
)
return
if parts[0] == "http":
await self.coordinator.musiccast.play_url_media(
self._zone_id, media_id, "HomeAssistant"
)
return
raise HomeAssistantError(
"Only presets, media from media browser and http URLs are supported"
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if self.state == STATE_OFF:
raise HomeAssistantError(
"The device has to be turned on to be able to browse media."
)
if media_content_id:
media_content_path = media_content_id.split(":")
media_content_provider = await MusicCastMediaContent.browse_media(
self.coordinator.musiccast, self._zone_id, media_content_path, 24
)
else:
media_content_provider = MusicCastMediaContent.categories(
self.coordinator.musiccast, self._zone_id
)
def get_content_type(item):
if item.can_play:
return MEDIA_CLASS_TRACK
return MEDIA_CLASS_DIRECTORY
children = [
BrowseMedia(
title=child.title,
media_class=MEDIA_CLASS_MAPPING.get(child.content_type),
media_content_id=child.content_id,
media_content_type=get_content_type(child),
can_play=child.can_play,
can_expand=child.can_browse,
thumbnail=child.thumbnail,
)
for child in media_content_provider.children
]
overview = BrowseMedia(
title=media_content_provider.title,
media_class=MEDIA_CLASS_MAPPING.get(media_content_provider.content_type),
media_content_id=media_content_provider.content_id,
media_content_type=get_content_type(media_content_provider),
can_play=False,
can_expand=media_content_provider.can_browse,
children=children,
)
return overview
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.coordinator.musiccast.select_sound_mode(self._zone_id, sound_mode)
@property
def media_image_url(self):
"""Return the image url of current playing media."""
if self.is_client and self.group_server != self:
return self.group_server.coordinator.musiccast.media_image_url
return self.coordinator.musiccast.media_image_url if self._is_netusb else None
@property
def media_title(self):
"""Return the title of current playing media."""
if self._is_netusb:
return self.coordinator.data.netusb_track
if self._is_tuner:
return self.coordinator.musiccast.tuner_media_title
return None
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
if self._is_netusb:
return self.coordinator.data.netusb_artist
if self._is_tuner:
return self.coordinator.musiccast.tuner_media_artist
return None
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self.coordinator.data.netusb_album if self._is_netusb else None
@property
def repeat(self):
"""Return current repeat mode."""
return (
MC_REPEAT_MODE_TO_HA_MAPPING.get(self.coordinator.data.netusb_repeat)
if self._is_netusb
else REPEAT_MODE_OFF
)
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = MUSIC_PLAYER_BASE_SUPPORT
zone = self.coordinator.data.zones[self._zone_id]
if ZoneFeature.POWER in zone.features:
supported_features |= SUPPORT_TURN_ON | SUPPORT_TURN_OFF
if ZoneFeature.VOLUME in zone.features:
supported_features |= SUPPORT_VOLUME_SET
if ZoneFeature.MUTE in zone.features:
supported_features |= SUPPORT_VOLUME_MUTE
if self._is_netusb or self._is_tuner:
supported_features |= SUPPORT_PREVIOUS_TRACK
supported_features |= SUPPORT_NEXT_TRACK
if self._is_netusb:
supported_features |= SUPPORT_PAUSE
supported_features |= SUPPORT_PLAY
supported_features |= SUPPORT_STOP
if self.state != STATE_OFF:
supported_features |= SUPPORT_BROWSE_MEDIA
return supported_features
async def async_media_previous_track(self):
"""Send previous track command."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_previous_track()
elif self._is_tuner:
await self.coordinator.musiccast.tuner_previous_station()
else:
raise HomeAssistantError(
"Service previous track is not supported for non NetUSB or Tuner sources."
)
async def async_media_next_track(self):
"""Send next track command."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_next_track()
elif self._is_tuner:
await self.coordinator.musiccast.tuner_next_station()
else:
raise HomeAssistantError(
"Service next track is not supported for non NetUSB or Tuner sources."
)
async def async_set_repeat(self, repeat):
"""Enable/disable repeat mode."""
if self._is_netusb:
await self.coordinator.musiccast.netusb_repeat(
HA_REPEAT_MODE_TO_MC_MAPPING.get(repeat, "off")
)
else:
raise HomeAssistantError(
"Service set repeat is not supported for non NetUSB sources."
)
async def async_select_source(self, source):
"""Select input source."""
await self.coordinator.musiccast.select_source(self._zone_id, source)
@property
def source(self):
"""Name of the current input source."""
return self.coordinator.data.zones[self._zone_id].input
@property
def source_list(self):
"""List of available input sources."""
return self.coordinator.data.zones[self._zone_id].input_list
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._is_netusb:
return self.coordinator.data.netusb_total_time
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._is_netusb:
return self.coordinator.data.netusb_play_time
return None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._is_netusb:
return self.coordinator.data.netusb_play_time_updated
return None
# Group and MusicCast System specific functions/properties
@property
def is_network_server(self) -> bool:
"""Return only true if the current entity is a network server and not a main zone with an attached zone2."""
return (
self.coordinator.data.group_role == "server"
and self.coordinator.data.group_id != NULL_GROUP
and self._zone_id == self.coordinator.data.group_server_zone
)
@property
def other_zones(self) -> list[MusicCastMediaPlayer]:
"""Return media player entities of the other zones of this device."""
return [
entity
for entity in self.coordinator.entities
if entity != self and isinstance(entity, MusicCastMediaPlayer)
]
@property
def is_server(self) -> bool:
"""Return whether the media player is the server/host of the group.
If the media player is not part of a group, False is returned.
"""
return self.is_network_server or (
self._zone_id == DEFAULT_ZONE
and len(
[
entity
for entity in self.other_zones
if entity.source == ATTR_MAIN_SYNC
]
)
> 0
)
@property
def is_network_client(self) -> bool:
"""Return True if the current entity is a network client and not just a main syncing entity."""
return (
self.coordinator.data.group_role == "client"
and self.coordinator.data.group_id != NULL_GROUP
and self.source == ATTR_MC_LINK
)
@property
def is_client(self) -> bool:
"""Return whether the media player is the client of a group.
If the media player is not part of a group, False is returned.
"""
return self.is_network_client or self.source == ATTR_MAIN_SYNC
def get_all_mc_entities(self) -> list[MusicCastMediaPlayer]:
"""Return all media player entities of the musiccast system."""
entities = []
for coordinator in self.hass.data[DOMAIN].values():
entities += [
entity
for entity in coordinator.entities
if isinstance(entity, MusicCastMediaPlayer)
]
return entities
def get_all_server_entities(self) -> list[MusicCastMediaPlayer]:
"""Return all media player entities in the musiccast system, which are in server mode."""
entities = self.get_all_mc_entities()
return [entity for entity in entities if entity.is_server]
def get_distribution_num(self) -> int:
"""Return the distribution_num (number of clients in the whole musiccast system)."""
return sum(
len(server.coordinator.data.group_client_list)
for server in self.get_all_server_entities()
)
def is_part_of_group(self, group_server) -> bool:
"""Return True if the given server is the server of self's group."""
return group_server != self and (
(
self.ip_address in group_server.coordinator.data.group_client_list
and self.coordinator.data.group_id
== group_server.coordinator.data.group_id
and self.ip_address != group_server.ip_address
and self.source == ATTR_MC_LINK
)
or (
self.ip_address == group_server.ip_address
and self.source == ATTR_MAIN_SYNC
)
)
@property
def group_server(self):
"""Return the server of the own group if present, self else."""
for entity in self.get_all_server_entities():
if self.is_part_of_group(entity):
return entity
return self
@property
def group_members(self) -> list[str] | None:
"""Return a list of entity_ids, which belong to the group of self."""
return [entity.entity_id for entity in self.musiccast_group]
@property
def musiccast_group(self) -> list[MusicCastMediaPlayer]:
"""Return all media players of the current group, if the media player is server."""
if self.is_client:
# If we are a client we can still share group information, but we will take them from the server.
server = self.group_server
if server != self:
return server.musiccast_group
return [self]
if not self.is_server:
return [self]
entities = self.get_all_mc_entities()
clients = [entity for entity in entities if entity.is_part_of_group(self)]
return [self] + clients
@property
def musiccast_zone_entity(self) -> MusicCastMediaPlayer:
"""Return the the entity of the zone, which is using MusicCast at the moment, if there is one, self else.
It is possible that multiple zones use MusicCast as client at the same time. In this case the first one is
returned.
"""
for entity in self.other_zones:
if entity.is_network_server or entity.is_network_client:
return entity
return self
async def update_all_mc_entities(self, check_clients=False):
"""Update the whole musiccast system when group data change."""
# First update all servers as they provide the group information for their clients
for entity in self.get_all_server_entities():
if check_clients or self.coordinator.musiccast.group_reduce_by_source:
await entity.async_check_client_list()
else:
entity.async_write_ha_state()
# Then update all other entities
for entity in self.get_all_mc_entities():
if not entity.is_server:
entity.async_write_ha_state()
# Services
async def async_join_players(self, group_members):
"""Add all clients given in entities to the group of the server.
Creates a new group if necessary. Used for join service.
"""
_LOGGER.debug(
"%s wants to add the following entities %s",
self.entity_id,
str(group_members),
)
entities = [
entity
for entity in self.get_all_mc_entities()
if entity.entity_id in group_members
]
if self.state == STATE_OFF:
await self.async_turn_on()
if not self.is_server and self.musiccast_zone_entity.is_server:
# The MusicCast Distribution Module of this device is already in use. To use it as a server, we first
# have to unjoin and wait until the servers are updated.
await self.musiccast_zone_entity.async_server_close_group()
elif self.musiccast_zone_entity.is_client:
await self.async_client_leave_group(True)
# Use existing group id if we are server, generate a new one else.
group = (
self.coordinator.data.group_id
if self.is_server
else uuid.random_uuid_hex().upper()
)
ip_addresses = set()
# First let the clients join
for client in entities:
if client != self:
try:
network_join = await client.async_client_join(group, self)
except MusicCastGroupException:
_LOGGER.warning(
"%s is struggling to update its group data. Will retry perform the update",
client.entity_id,
)
network_join = await client.async_client_join(group, self)
if network_join:
ip_addresses.add(client.ip_address)
if ip_addresses:
await self.coordinator.musiccast.mc_server_group_extend(
self._zone_id,
list(ip_addresses),
group,
self.get_distribution_num(),
)
_LOGGER.debug(
"%s added the following entities %s", self.entity_id, str(entities)
)
_LOGGER.debug(
"%s has now the following musiccast group %s",
self.entity_id,
str(self.musiccast_group),
)
await self.update_all_mc_entities(True)
async def async_unjoin_player(self):
"""Leave the group.
Stops the distribution if device is server. Used for unjoin service.
"""
_LOGGER.debug("%s called service unjoin", self.entity_id)
if self.is_server:
await self.async_server_close_group()
else:
await self.async_client_leave_group()
await self.update_all_mc_entities(True)
# Internal client functions
async def async_client_join(self, group_id, server) -> bool:
"""Let the client join a group.
If this client is a server, the server will stop distributing. If the client is part of a different group,
it will leave that group first. Returns True, if the server has to add the client on his side.
"""
# If we should join the group, which is served by the main zone, we can simply select main_sync as input.
_LOGGER.debug("%s called service client join", self.entity_id)
if self.state == STATE_OFF:
await self.async_turn_on()
if self.ip_address == server.ip_address:
if server.zone == DEFAULT_ZONE:
await self.async_select_source(ATTR_MAIN_SYNC)
server.async_write_ha_state()
return False
# It is not possible to join a group hosted by zone2 from main zone.
raise HomeAssistantError(
"Can not join a zone other than main of the same device."
)
if self.musiccast_zone_entity.is_server:
# If one of the zones of the device is a server, we need to unjoin first.
_LOGGER.debug(
"%s is a server of a group and has to stop distribution "
"to use MusicCast for %s",
self.musiccast_zone_entity.entity_id,
self.entity_id,
)
await self.musiccast_zone_entity.async_server_close_group()
elif self.is_client:
if self.is_part_of_group(server):
_LOGGER.warning("%s is already part of the group", self.entity_id)
return False
_LOGGER.debug(
"%s is client in a different group, will unjoin first",
self.entity_id,
)
await self.async_client_leave_group()
elif (
self.ip_address in server.coordinator.data.group_client_list
and self.coordinator.data.group_id == server.coordinator.data.group_id
and self.coordinator.data.group_role == "client"
):
# The device is already part of this group (e.g. main zone is also a client of this group).
# Just select mc_link as source
await self.coordinator.musiccast.zone_join(self._zone_id)
return False
_LOGGER.debug("%s will now join as a client", self.entity_id)
await self.coordinator.musiccast.mc_client_join(
server.ip_address, group_id, self._zone_id
)
return True
async def async_client_leave_group(self, force=False):
"""Make self leave the group.
Should only be called for clients.
"""
_LOGGER.debug("%s client leave called", self.entity_id)
if not force and (
self.source == ATTR_MAIN_SYNC
or [entity for entity in self.other_zones if entity.source == ATTR_MC_LINK]
):
await self.coordinator.musiccast.zone_unjoin(self._zone_id)
else:
servers = [
server
for server in self.get_all_server_entities()
if server.coordinator.data.group_id == self.coordinator.data.group_id
]
await self.coordinator.musiccast.mc_client_unjoin()
if servers:
await servers[0].coordinator.musiccast.mc_server_group_reduce(
servers[0].zone_id, [self.ip_address], self.get_distribution_num()
)
# Internal server functions
async def async_server_close_group(self):
"""Close group of self.
Should only be called for servers.
"""
_LOGGER.debug("%s closes his group", self.entity_id)
for client in self.musiccast_group:
if client != self:
await client.async_client_leave_group()
await self.coordinator.musiccast.mc_server_group_close()
async def async_check_client_list(self):
"""Let the server check if all its clients are still part of his group."""
if not self.is_server or self.coordinator.data.group_update_lock.locked():
return
_LOGGER.debug("%s updates his group members", self.entity_id)
client_ips_for_removal = []
for expected_client_ip in self.coordinator.data.group_client_list:
if expected_client_ip not in [
entity.ip_address for entity in self.musiccast_group
]:
# The client is no longer part of the group. Prepare removal.
client_ips_for_removal.append(expected_client_ip)
if client_ips_for_removal:
_LOGGER.debug(
"%s says good bye to the following members %s",
self.entity_id,
str(client_ips_for_removal),
)
await self.coordinator.musiccast.mc_server_group_reduce(
self._zone_id, client_ips_for_removal, self.get_distribution_num()
)
if len(self.musiccast_group) < 2:
# The group is empty, stop distribution.
await self.async_server_close_group()
self.async_write_ha_state()
@callback
def async_schedule_check_client_list(self):
"""Schedule async_check_client_list."""
self.hass.create_task(self.async_check_client_list())
|
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module contains the top-down erro debaising.
The top_down_error module can give user result, according to which
column group of column top-down error exist for some perticular
slicing. It will check top-down error for all related groups and
suggest the user to do operation on those groups.
"""
import pandas
from util import aspects
from util.enums import SummaryOperators, Filters, Oversights
from util import constants
def top_down_error(table, metric, all_dimensions, slice_compare_column,
slice1, slice2, summary_operator, **kwargs):
"""This function will implement the top down error debaising
Args:
table: Type-pandas.dataframe
It has the contents of the csv file
metric: Type-string
It is the name of the column according to which we will do
grouping, summary operator is applied on metric. Metric
could a column containing strings, if we are applying count
operator on it.
dimensions: Type-list of str or None
It is the name of column we want.
In query:'compare batsman A and B according to total_runs',
dimension is 'batsman'. we group by dimensions.
all_dimension: Type-list of str
It contains list of all dimensions
slice_compare_column: Type-string
name of the slice-compare column.
slice1: Type-string
the first value of comparision
slice2: Type-string
the second value of comparision
date_range: Type-tuple
Tuple of start_date and end_date
date_column_name: Type-str
It is the name of column which contains date
day_first: Type-str
It is required by datetime.strp_time to parse the date in
the format Format Codes
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
slices: Type-List of tuples
Tuple represents the conditon to keep the row.
(column_name, filter, value)
column_name - is the value of the column that the
condition is applied upon.
filter - Filters enum members, ex. Filters.IN
summary_operator: Type-summary_operators enum members
It denotes the summary operator, after grouping by dimensions.
ex. SummaryOperators.MAX, SummaryOperators.SUM
Note-summary_operator is always applied on metric column passed,
and only when grouping is done
Returns:
return a list of dictionary where each dictionary represent
a debiasing suggestion according to the new column.
"""
date_column_name = kwargs.get('date_column_name', 'date')
date_range = kwargs.get('date_range', None)
day_first = kwargs.get('day_first', '%Y-%m-%d')
slices = kwargs.get('slices', None)
dimensions = kwargs.get('dimensions', None)
table = aspects.apply_date_range(table, date_range,
date_column_name, day_first)
slice_list = []
if slices is not None:
slice_list = slices.copy()
slice_list.append((slice_compare_column, Filters.IN, [slice1, slice2]))
table = aspects.slice_table(table, slice_list)
# removing all metric column except the one by which we do
# group_by operation
required_columns = all_dimensions.copy()
required_columns.append(metric)
table = aspects.crop_other_columns(table, required_columns)
# operational_dimensions contain list of all dimension except
# slice_compare_column
operational_dimensions = all_dimensions.copy()
operational_dimensions.remove(slice_compare_column)
required_columns = []
if dimensions is not None:
required_columns = dimensions.copy()
required_columns.append(slice_compare_column)
required_columns.append(metric)
query_table = aspects.crop_other_columns(table, required_columns)
grouping_columns = []
if dimensions is not None:
grouping_columns = dimensions.copy()
grouping_columns.append(slice_compare_column)
# result_table is the result table requested by user.
result_table = aspects.group_by(query_table,
grouping_columns,
summary_operator)['table']
# suggestions store the list of debiasing for this oversight.
suggestions = []
dimension_list = []
if dimensions is not None:
dimension_list = dimensions.copy()
dimensions_len = len(dimension_list)
for column in operational_dimensions:
# we try to find the debiasing for every column which
# is not in the grouping list initially
if column not in dimension_list:
new_grouping_columns = dimension_list.copy()
new_grouping_columns.append(column)
new_grouping_columns.append(slice_compare_column)
new_required_columns = new_grouping_columns.copy()
new_required_columns.append(metric)
new_cropped_table = aspects.crop_other_columns(table,
new_required_columns)
# result table after adding the new column in the grouping list.
new_result_table = aspects.group_by(new_cropped_table,
new_grouping_columns,
summary_operator)['table']
# it will return the debiasing suggestion after comparing the
# initial result table and new result table.
new_suggestion = _check_top_down_error(result_table,
new_result_table,
column, slice1,
dimensions_len)
if new_suggestion != None:
suggestions.append(new_suggestion)
return suggestions
def _check_top_down_error(result_table, new_result_table, new_added_column,
slice1, dimensions_len):
"""
Args:
result_table: Type-pandas.dataframe
It has the contents of the csv file
new_result_table: Type-pandas.dataframe
It has the contents of the csv file
new_added_column: Type-string
The new column added in the initial grouping column.
slice1: Type-string
the first slice by which we do comparision.
dimensions_len: Type-integer
size of the initial gfrouping dimensions.
Returns:
return a dictionary where each dictionary represent a debiasing
suggestion according to the new column.
"""
num_rows = result_table.shape[0]
new_num_rows = new_result_table.shape[0]
# changing dataframe to list of list to do next set of operation
table_matrix = result_table.values.tolist()
new_table_matrix = new_result_table.values.tolist()
row_i = 0
new_row_i = 0
suggestion_row_list = []
# dominant percentage is the percentage of pairs where
# (first value - second value) is positive.
while row_i < num_rows:
if row_i == num_rows - 1 or table_matrix[row_i][:dimensions_len] != \
table_matrix[row_i + 1][:dimensions_len]:
while new_row_i < new_num_rows and \
table_matrix[row_i][:dimensions_len] == \
new_table_matrix[new_row_i][:dimensions_len]:
new_row_i = new_row_i + 1
else:
new_max_correlation = 0
while new_row_i < new_num_rows and \
table_matrix[row_i][:dimensions_len] == \
new_table_matrix[new_row_i][:dimensions_len]:
if new_row_i == new_num_rows - 1 or \
new_table_matrix[new_row_i][:dimensions_len + 1] != \
new_table_matrix[new_row_i + 1][:dimensions_len + 1]:
max_disimilarity = constants.TOP_DOWN_ERROR_DISSIMILARITY_THRESHOLD
else:
correlation = _calculate_relation(
new_table_matrix[new_row_i][dimensions_len + 2],
new_table_matrix[new_row_i + 1][dimensions_len + 2])
new_max_correlation = max(correlation, new_max_correlation)
new_row_i = new_row_i + 1
new_row_i = new_row_i + 1
correlation = _calculate_relation(
table_matrix[row_i][dimensions_len + 1],
table_matrix[row_i + 1][dimensions_len + 1])
if new_max_correlation >= \
constants.TOP_DOWN_ERROR_DISSIMILARITY_THRESHOLD and \
correlation <= constants.TOP_DOWN_ERROR_SIMILARITY_THRESHOLD:
suggestion_row_list.append({'row': row_i + 1, 'confidence_score':100})
suggestion_row_list.append({'row': row_i + 2, 'confidence_score':100})
row_i = row_i + 1
row_i = row_i + 1
if len(suggestion_row_list) == 0:
return
else:
new_suggestion = {}
new_suggestion['suggestion'] = 'Some values are similar here but will vary if we add '\
+ new_added_column + ' for grouping '
new_suggestion['oversight'] = Oversights.TOP_DOWN_ERROR
new_suggestion['is_row_level_suggestion'] = True
new_suggestion['row_list'] = suggestion_row_list
return new_suggestion
def _calculate_relation(val1, val2):
"""
This function can find the similarity between two values
Arg:
val1: the first value for which we have to compute the similarity
val2: the second value for which we have to compute the similarity
Returns:
return the similarity between both the arguments calculated by the
formula
similarity = |val1 - val2| / (|val1| + |val2|)
"""
if abs(val1) + abs(val2) == 0:
return 0
result = abs(val1 - val2) / (abs(val1) + abs(val2))
return result
|
|
#!/usr/bin/env python3
import os
import sys
import subprocess
import struct
import optparse
import binascii
from io import BytesIO
import array
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
class GitWrapper:
@classmethod
def command(cls, txt):
cmd = "git " + txt
pr = subprocess.Popen( cmd , shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
if len(error):
raise Exception(cmd +" failed with [" + error.strip() + "]")
return out
class AppDescriptor(object):
"""
UAVCAN firmware image descriptor format:
uint64_t signature (bytes [7:0] set to '{ 0x40, 0xa2, 0xe4, 0xf1, 0x64, 0x68, 0x91, 0x06 }' by the build
uint32_t crc32_block1 From offset 0 to . (non inclusive) (set by this tool)
uint32_t crc32_block2 From offsetof(minor_version) to end (set by this tool)
uint32_t image_size (set to 0 by linker script)
uint32_t vcs_commit (set in source or by this tool)
uint8_t version_major (set in source)
uint8_t version_minor (set in source)
uint16_t board_id (set in source)
uint8_t reserved[6] (set to 0xFF by linker script)
"""
LENGTH = 8 + 4 + 4 + 4 + 4 + 1 + 1 + 2 + 8
DESLENGTH = 4 + 4 + 4 + 4
SIGNATURE = b"\x40\xa2\xe4\xf1\x64\x68\x91\x06"
RESERVED = b"\xFF" * 8
def __init__(self, bytes=None):
self.signature = AppDescriptor.SIGNATURE
self.crc32_block1 = 0
self.crc32_block2 = 0
self.image_size = 0
self.vcs_commit = 0
self.version_major = 0
self.version_minor = 0
self.board_id = 0
self.reserved = AppDescriptor.RESERVED
if bytes:
try:
self.unpack(bytes)
except Exception:
raise ValueError("Invalid AppDescriptor: {0}".format(
binascii.b2a_hex(bytes)))
def pack(self):
return struct.pack("<8sLLLLBBH8s", self.signature, self.crc32_block1,
self.crc32_block2, self.image_size, self.vcs_commit,
self.version_major, self.version_minor,
self.board_id,
self.reserved)
def unpack(self, bytes):
(self.signature, self.crc32_block1, self.crc32_block2, self.image_size, self.vcs_commit,
self.version_major, self.version_minor, self.board_id, self.reserved) = \
struct.unpack("<8sLLLLBBH8s", bytes)
if not self.empty and not self.valid:
raise ValueError()
@property
def empty(self):
return (self.signature == AppDescriptor.SIGNATURE and
self.crc32_block1 == 0 and self.crc32_block2 == 0 and
self.image_size == 0 and self.reserved == AppDescriptor.RESERVED)
@property
def valid(self):
return (self.signature == AppDescriptor.SIGNATURE and
self.crc32_block1 != 0 and self.crc32_block2 != 0 and
self.image_size > 0 and self.board_id != 0 and
self.reserved == AppDescriptor.RESERVED)
class FirmwareImage(object):
def __init__(self, path_or_file, mode="r"):
if getattr(path_or_file, "read", None):
self._file = path_or_file
self._do_close = False
self._padding = 0
else:
if "b" not in mode:
self._file = open(path_or_file, mode + "b")
else:
self._file = open(path_or_file, mode)
self._do_close = True
self._padding = 4
if "r" in mode:
self._contents = BytesIO(self._file.read())
else:
self._contents = BytesIO()
self._do_write = False
self._length = None
self._descriptor_offset = None
self._descriptor_bytes = None
self._descriptor = None
def __enter__(self):
return self
def __getattr__(self, attr):
if attr == "write":
self._do_write = True
return getattr(self._contents, attr)
def __iter__(self):
return iter(self._contents)
def __exit__(self, *args):
if self._do_write:
if getattr(self._file, "seek", None):
self._file.seek(0)
self._file.write(self._contents.getvalue())
if self._padding:
self._file.write(b'\xff' * self._padding)
if self._do_close:
self._file.close()
def _write_descriptor_raw(self):
# Seek to the appropriate location, write the serialized
# descriptor, and seek back.
prev_offset = self._contents.tell()
self._contents.seek(self._descriptor_offset)
self._contents.write(self._descriptor.pack())
self._contents.seek(prev_offset)
def write_descriptor(self):
# Set the descriptor's length and CRC to the values required for
# CRC computation
self.app_descriptor.image_size = self.length
self.app_descriptor.crc32_block1 = 0
self.app_descriptor.crc32_block2 = 0
self._write_descriptor_raw()
content = bytearray(self._contents.getvalue())
if self._padding:
content += bytearray.fromhex("ff" * self._padding)
# Update the descriptor's CRC based on the computed value and write
# it out again
self.app_descriptor.crc32_block1 = self.crc32(content[:self.app_descriptor_offset + len(AppDescriptor.SIGNATURE)])
b2 = self.app_descriptor_offset + len(AppDescriptor.SIGNATURE) + AppDescriptor.DESLENGTH
self.app_descriptor.crc32_block2 = self.crc32(content[b2:])
self._write_descriptor_raw()
def crc32(self, bytes, crc = 0):
for byte in bytes:
index = (crc ^ byte) & 0xff
crc = crctab[index] ^ (crc >> 8)
return crc
@property
def padding(self):
return self._padding
@property
def length(self):
if not self._length:
# Find the length of the file by seeking to the end and getting
# the offset
prev_offset = self._contents.tell()
self._contents.seek(0, os.SEEK_END)
self._length = self._contents.tell()
if self._padding:
fill = self._padding - (self._length % self._padding)
if fill:
self._length += fill
self._padding = fill
self._contents.seek(prev_offset)
return self._length
@property
def app_descriptor_offset(self):
if not self._descriptor_offset:
# Save the current position
prev_offset = self._contents.tell()
# Check each byte in the file to see if a valid descriptor starts
# at that location. Slow, but not slow enough to matter.
offset = 0
while offset < self.length - AppDescriptor.LENGTH:
self._contents.seek(offset)
try:
# If this throws an exception, there isn't a valid
# descriptor at this offset
AppDescriptor(self._contents.read(AppDescriptor.LENGTH))
except Exception:
offset += 1
else:
self._descriptor_offset = offset
break
# Go back to the previous position
self._contents.seek(prev_offset)
if not self._descriptor_offset:
raise Exception('AppDescriptor not found')
return self._descriptor_offset
@property
def app_descriptor(self):
if not self._descriptor:
# Save the current position
prev_offset = self._contents.tell()
# Jump to the descriptor adn parse it
self._contents.seek(self.app_descriptor_offset)
self._descriptor_bytes = self._contents.read(AppDescriptor.LENGTH)
self._descriptor = AppDescriptor(self._descriptor_bytes)
# Go back to the previous offset
self._contents.seek(prev_offset)
return self._descriptor
@app_descriptor.setter
def app_descriptor(self, value):
self._descriptor = value
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage: %prog [options] [IN OUT]")
parser.add_option("--vcs-commit", dest="vcs_commit", default=None,
help="set the descriptor's VCS commit value to COMMIT",
metavar="COMMIT")
parser.add_option("-g", "--use-git-hash", dest="use_git_hash", action="store_true",
help="set the descriptor's VCS commit value to the current git hash",
metavar="GIT")
parser.add_option("--bootloader-size", dest="bootloader_size", default=0,
help="don't write the first SIZE bytes of the image",
metavar="SIZE")
parser.add_option("--bootloader-image", dest="bootloader_image", default=0,
help="prepend a bootloader image to the output file",
metavar="IMAGE")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="show additional firmware information on stdout")
options, args = parser.parse_args()
if len(args) not in (0, 2):
parser.error("specify both IN or OUT for file operation, or " +
"neither for stdin/stdout operation")
if options.vcs_commit and options.use_git_hash:
parser.error("options --vcs-commit and --use-git-commit are mutually exclusive")
if options.use_git_hash:
try:
options.vcs_commit = int(GitWrapper.command("rev-list HEAD --max-count=1 --abbrev=8 --abbrev-commit"),16)
except Exception as e:
print("Git Command failed "+ str(e) +"- Exiting!")
quit()
if args:
in_file = args[0]
out_file = args[1]
else:
in_file = sys.stdin
out_file = sys.stdout
bootloader_image = b""
if options.bootloader_image:
with open(options.bootloader_image, "rb") as bootloader:
bootloader_image = bootloader.read()
bootloader_size = int(options.bootloader_size)
with FirmwareImage(in_file, "rb") as in_image:
with FirmwareImage(out_file, "wb") as out_image:
image = in_image.read()
out_image.write(bootloader_image)
out_image.write(image[bootloader_size:])
if options.vcs_commit:
out_image.app_descriptor.vcs_commit = options.vcs_commit
out_image.write_descriptor()
if options.verbose:
sys.stderr.write(
"""
Application descriptor located at offset 0x{0.app_descriptor_offset:08X}
""".format(in_image, in_image.app_descriptor, out_image.app_descriptor,
bootloader_size, len(bootloader_image)))
if bootloader_size:
sys.stderr.write(
"""Ignored the first {3:d} bytes of the input image. Prepended {4:d} bytes of
bootloader image to the output image.
""".format(in_image, in_image.app_descriptor, out_image.app_descriptor,
bootloader_size, len(bootloader_image)))
sys.stderr.write(
"""READ VALUES
------------------------------------------------------------------------------
Field Type Value
signature uint64 {1.signature!r}
crc32_block1 uint32 0x{1.crc32_block1:08X}
crc32_block2 uint32 0x{1.crc32_block2:08X}
image_size uint32 0x{1.image_size:X} ({1.image_size:d} B)
vcs_commit uint32 {1.vcs_commit:08X}
version_major uint8 {1.version_major:d}
version_minor uint8 {1.version_minor:d}
board_id uint32 0x{1.board_id:X}
reserved uint8[8] {1.reserved!r}
WRITTEN VALUES
------------------------------------------------------------------------------
Field Type Value
signature uint64 {2.signature!r}
crc32_block1 uint32 0x{2.crc32_block1:08X}
crc32_block2 uint32 0x{2.crc32_block2:08X}
image_size uint32 0x{2.image_size:X} ({2.image_size:d} B)
vcs_commit uint32 {2.vcs_commit:08X}
version_major uint8 {2.version_major:d}
version_minor uint8 {2.version_minor:d}
board_id uint32 0x{2.board_id:X}
reserved uint8[8] {2.reserved!r}
""".format(in_image, in_image.app_descriptor, out_image.app_descriptor,
bootloader_size, len(bootloader_image)))
if out_image.padding:
sys.stderr.write(
"""
padding added {}
""".format(out_image.padding))
|
|
import unittest
from test import test_support
import operator
from sys import maxint
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
class oldstyle:
def __index__(self):
return self.ind
class newstyle(object):
def __index__(self):
return self.ind
class TrapInt(int):
def __index__(self):
return self
class TrapLong(long):
def __index__(self):
return self
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.o = oldstyle()
self.n = newstyle()
def test_basic(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(operator.index(self.o), -2)
self.assertEqual(operator.index(self.n), 2)
def test_slice(self):
self.o.ind = 1
self.n.ind = 2
slc = slice(self.o, self.o, self.o)
check_slc = slice(1, 1, 1)
self.assertEqual(slc.indices(self.o), check_slc.indices(1))
slc = slice(self.n, self.n, self.n)
check_slc = slice(2, 2, 2)
self.assertEqual(slc.indices(self.n), check_slc.indices(2))
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(6 .__index__(), 6)
self.assertEqual(-7L.__index__(), -7)
self.assertEqual(self.o.__index__(), 4)
self.assertEqual(self.n.__index__(), 5)
def test_subclasses(self):
r = range(10)
self.assertEqual(r[TrapInt(5):TrapInt(10)], r[5:10])
self.assertEqual(r[TrapLong(5):TrapLong(10)], r[5:10])
self.assertEqual(slice(TrapInt()).indices(0), (0,0,1))
self.assertEqual(slice(TrapLong(0)).indices(0), (0,0,1))
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
self.failUnlessRaises(TypeError, operator.index, self.o)
self.failUnlessRaises(TypeError, operator.index, self.n)
self.failUnlessRaises(TypeError, slice(self.o).indices, 0)
self.failUnlessRaises(TypeError, slice(self.n).indices, 0)
class SeqTestCase(unittest.TestCase):
# This test case isn't run directly. It just defines common tests
# to the different sequence types below
def setUp(self):
self.o = oldstyle()
self.n = newstyle()
self.o2 = oldstyle()
self.n2 = newstyle()
def test_index(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(self.seq[self.n], self.seq[2])
self.assertEqual(self.seq[self.o], self.seq[-2])
def test_slice(self):
self.o.ind = 1
self.o2.ind = 3
self.n.ind = 2
self.n2.ind = 4
self.assertEqual(self.seq[self.o:self.o2], self.seq[1:3])
self.assertEqual(self.seq[self.n:self.n2], self.seq[2:4])
def test_repeat(self):
self.o.ind = 3
self.n.ind = 2
self.assertEqual(self.seq * self.o, self.seq * 3)
self.assertEqual(self.seq * self.n, self.seq * 2)
self.assertEqual(self.o * self.seq, self.seq * 3)
self.assertEqual(self.n * self.seq, self.seq * 2)
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(self.seq.__getitem__(self.o), self.seq[4])
self.assertEqual(self.seq.__mul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__rmul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__getitem__(self.n), self.seq[5])
self.assertEqual(self.seq.__mul__(self.n), self.seq * 5)
self.assertEqual(self.seq.__rmul__(self.n), self.seq * 5)
def test_subclasses(self):
self.assertEqual(self.seq[TrapInt()], self.seq[0])
self.assertEqual(self.seq[TrapLong()], self.seq[0])
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
indexobj = lambda x, obj: obj.seq[x]
self.failUnlessRaises(TypeError, indexobj, self.o, self)
self.failUnlessRaises(TypeError, indexobj, self.n, self)
sliceobj = lambda x, obj: obj.seq[x:]
self.failUnlessRaises(TypeError, sliceobj, self.o, self)
self.failUnlessRaises(TypeError, sliceobj, self.n, self)
class ListTestCase(SeqTestCase):
seq = [0,10,20,30,40,50]
def test_setdelitem(self):
self.o.ind = -2
self.n.ind = 2
lst = list('ab!cdefghi!j')
del lst[self.o]
del lst[self.n]
lst[self.o] = 'X'
lst[self.n] = 'Y'
self.assertEqual(lst, list('abYdefghXj'))
lst = [5, 6, 7, 8, 9, 10, 11]
lst.__setitem__(self.n, "here")
self.assertEqual(lst, [5, 6, "here", 8, 9, 10, 11])
lst.__delitem__(self.n)
self.assertEqual(lst, [5, 6, 8, 9, 10, 11])
def test_inplace_repeat(self):
self.o.ind = 2
self.n.ind = 3
lst = [6, 4]
lst *= self.o
self.assertEqual(lst, [6, 4, 6, 4])
lst *= self.n
self.assertEqual(lst, [6, 4, 6, 4] * 3)
lst = [5, 6, 7, 8, 9, 11]
l2 = lst.__imul__(self.n)
self.assert_(l2 is lst)
self.assertEqual(lst, [5, 6, 7, 8, 9, 11] * 3)
class TupleTestCase(SeqTestCase):
seq = (0,10,20,30,40,50)
class StringTestCase(SeqTestCase):
seq = "this is a test"
class UnicodeTestCase(SeqTestCase):
seq = u"this is a test"
class XRangeTestCase(unittest.TestCase):
def test_xrange(self):
n = newstyle()
n.ind = 5
self.assertEqual(xrange(1, 20)[n], 6)
self.assertEqual(xrange(1, 20).__getitem__(n), 6)
class OverflowTestCase(unittest.TestCase):
def setUp(self):
self.pos = 2**100
self.neg = -self.pos
def test_large_longs(self):
self.assertEqual(self.pos.__index__(), self.pos)
self.assertEqual(self.neg.__index__(), self.neg)
def _getitem_helper(self, base):
class GetItem(base):
def __len__(self):
return maxint #cannot return long here
def __getitem__(self, key):
return key
def __getslice__(self, i, j):
return i, j
x = GetItem()
self.assertEqual(x[self.pos], self.pos)
self.assertEqual(x[self.neg], self.neg)
self.assertEqual(x[self.neg:self.pos], (maxint+minsize, maxsize))
self.assertEqual(x[self.neg:self.pos:1].indices(maxsize), (0, maxsize, 1))
def test_getitem(self):
self._getitem_helper(object)
def test_getitem_classic(self):
class Empty: pass
self._getitem_helper(Empty)
def test_sequence_repeat(self):
self.failUnlessRaises(OverflowError, lambda: "a" * self.pos)
self.failUnlessRaises(OverflowError, lambda: "a" * self.neg)
def test_main():
test_support.run_unittest(
BaseTestCase,
ListTestCase,
TupleTestCase,
StringTestCase,
UnicodeTestCase,
XRangeTestCase,
OverflowTestCase,
)
if __name__ == "__main__":
test_main()
|
|
from __future__ import unicode_literals
from unittest import skipIf
from django.db import connection, connections
from django.db.migrations.exceptions import (
AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError,
)
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
set((x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"),
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
set((x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
)
def test_marked_as_migrated(self):
"""
Undefined MIGRATION_MODULES implies default migration module.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, {'migrated_app'})
self.assertEqual(migration_loader.unmigrated_apps, set())
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={"migrated_app": None},
)
def test_marked_as_unmigrated(self):
"""
MIGRATION_MODULES allows disabling of migrations for a particular app.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4 we'd need to use non-existing migrations
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history(self):
loader = MigrationLoader(connection=None)
loader.check_consistent_history(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial"
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
loader.check_consistent_history(connection)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history_squashed(self):
"""
MigrationLoader.check_consistent_history() should ignore unapplied
squashed migrations that have all of their `replaces` applied.
"""
loader = MigrationLoader(connection=None)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0001_initial')
recorder.record_applied('migrations', '0002_second')
loader.check_consistent_history(connection)
recorder.record_applied('migrations', '0003_third')
loader.check_consistent_history(connection)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_ref_squashed.app1",
"app2": "migrations.test_migrations_squashed_ref_squashed.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_ref_squashed.app1",
"migrations.test_migrations_squashed_ref_squashed.app2",
]})
def test_loading_squashed_ref_squashed(self):
"Tests loading a squashed migration with a new migration referencing it"
"""
The sample migrations are structred like this:
app_1 1 --> 2 ---------------------*--> 3 *--> 4
\ / /
*-------------------*----/--> 2_sq_3 --*
\ / /
=============== \ ============= / == / ======================
app_2 *--> 1_sq_2 --* /
\ /
*--> 1 --> 2 --*
Where 2_sq_3 is a replacing migration for 2 and 3 in app_1,
as 1_sq_2 is a replacing migration for 1 and 2 in app_2.
"""
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Load with nothing applied: both migrations squashed.
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply a few from app1: unsquashes migration in app1.
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply one from app2: unsquashes migration in app2 too.
recorder.record_applied('app2', '1_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '2_auto'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
|
|
# -*- coding: utf-8 -*-
import dedupe
import unittest
import numpy
import random
import itertools
import collections
from collections import OrderedDict
DATA = { 100 : {"name": "Bob", "age": "50"},
105 : {"name": "Charlie", "age": "75"},
110 : {"name": "Meredith", "age": "40"},
115 : {"name": "Sue", "age": "10"},
120 : {"name": "Jimmy", "age": "20"},
125 : {"name": "Jimbo", "age": "21"},
130 : {"name": "Willy", "age": "35"},
135 : {"name": "William", "age": "35"},
140 : {"name": "Martha", "age": "19"},
145 : {"name": "Kyle", "age": "27"}
}
DATA_SAMPLE = ((dedupe.core.frozendict({'age': '27', 'name': 'Kyle'}),
dedupe.core.frozendict({'age': '50', 'name': 'Bob'})),
(dedupe.core.frozendict({'age': '27', 'name': 'Kyle'}),
dedupe.core.frozendict({'age': '35', 'name': 'William'})),
(dedupe.core.frozendict({'age': '10', 'name': 'Sue'}),
dedupe.core.frozendict({'age': '35', 'name': 'William'})),
(dedupe.core.frozendict({'age': '27', 'name': 'Kyle'}),
dedupe.core.frozendict({'age': '20', 'name': 'Jimmy'})),
(dedupe.core.frozendict({'age': '75', 'name': 'Charlie'}),
dedupe.core.frozendict({'age': '21', 'name': 'Jimbo'})))
class DataModelTest(unittest.TestCase) :
def test_data_model(self) :
DataModel = dedupe.datamodel.DataModel
self.assertRaises(TypeError, DataModel)
assert DataModel({}) == {'fields': [], 'bias': 0}
data_model = DataModel([{'field' : 'a',
'variable name' : 'a',
'type' : 'String'},
{'field' : 'b',
'variable name' : 'b',
'type' : 'String'},
{'type' : 'Interaction',
'interaction variables' : ['a', 'b']}])
assert data_model['fields'][2].interaction_fields == ['a', 'b']
data_model = DataModel([{'field' : 'a',
'variable name' : 'a',
'type' : 'String',
'has missing' : True},
{'field' : 'b',
'variable name' : 'b',
'type' : 'String'},
{'type' : 'Interaction',
'interaction variables' : ['a', 'b']}])
#print data_model['fields']
assert data_model['fields'][2].has_missing == True
data_model = DataModel([{'field' : 'a',
'variable name' : 'a',
'type' : 'String',
'has missing' : False},
{'field' : 'b',
'variable name' : 'b',
'type' : 'String'},
{'type' : 'Interaction',
'interaction variables' : ['a', 'b']}])
assert data_model['fields'][2].has_missing == False
class ConnectedComponentsTest(unittest.TestCase) :
def test_components(self) :
G = numpy.array([((1, 2), .1),
((2, 3), .2),
((4, 5), .2),
((4, 6), .2),
((7, 9), .2),
((8, 9), .2),
((10, 11), .2),
((12, 13), .2),
((12, 14), .5),
((11, 12), .2)],
dtype = [('pairs', 'i4', 2), ('score', 'f4', 1)])
components = dedupe.clustering.connected_components
numpy.testing.assert_equal(list(components(G, 30000)), \
[numpy.array([([1, 2], 0.10000000149011612),
([2, 3], 0.20000000298023224)],
dtype=[('pairs', 'i4', (2,)),
('score', '<f4')]),
numpy.array([([4, 5], 0.20000000298023224),
([4, 6], 0.20000000298023224)],
dtype=[('pairs', 'i4', (2,)),
('score', '<f4')]),
numpy.array([([12, 13], 0.20000000298023224),
([12, 14], 0.5),
([10, 11], 0.20000000298023224),
([11, 12], 0.20000000298023224)],
dtype=[('pairs', 'i4', (2,)),
('score', '<f4')]),
numpy.array([([7, 9], 0.20000000298023224),
([8, 9], 0.20000000298023224)],
dtype=[('pairs', 'i4', (2,)),
('score', '<f4')])])
class ClusteringTest(unittest.TestCase):
def setUp(self):
# Fully connected star network
self.dupes = numpy.array([((1,2), .86),
((1,3), .72),
((1,4), .2),
((1,5), .6),
((2,3), .86),
((2,4), .2),
((2,5), .72),
((3,4), .3),
((3,5), .5),
((4,5), .72),
((10,11), .9)],
dtype = [('pairs', 'i4', 2),
('score', 'f4', 1)])
#Dupes with Ids as String
self.str_dupes = numpy.array([(('1', '2'), .86),
(('1', '3'), .72),
(('1', '4'), .2),
(('1', '5'), .6),
(('2', '3'), .86),
(('2', '4'), .2),
(('2', '5'), .72),
(('3', '4'), .3),
(('3', '5'), .5),
(('4', '5'), .72)],
dtype = [('pairs', 'S4', 2), ('score', 'f4', 1)])
self.bipartite_dupes = (((1,5), .1),
((1,6), .72),
((1,7), .2),
((1,8), .6),
((2,5), .2),
((2,6), .2),
((2,7), .72),
((2,8), .3),
((3,5), .24),
((3,6), .72),
((3,7), .24),
((3,8), .65),
((4,5), .63),
((4,6), .96),
((4,7), .23),
((5,8), .24))
def clusterEquals(self, x, y) :
for cluster_a, cluster_b in zip(x, y) :
if cluster_a[0] != cluster_b[0] :
return False
for score_a, score_b in zip(cluster_a[1], cluster_b[1]) :
if abs(score_a - score_b) > 0.001 :
return False
else :
return True
def test_hierarchical(self):
hierarchical = dedupe.clustering.cluster
assert self.clusterEquals(list(hierarchical(self.dupes, 1)),
[((10, 11),
(0.89999,
0.89999))])
assert self.clusterEquals(hierarchical(self.dupes, 0.5),
[((1, 2, 3),
(0.79,
0.860,
0.79)),
((4, 5),
(0.720,
0.720)),
((10, 11),
(0.899,
0.899))])
print(hierarchical(self.dupes, 0.0))
assert self.clusterEquals(hierarchical(self.dupes, 0),
[((1, 2, 3, 4, 5),
(0.595,
0.660,
0.595,
0.355,
0.635)),
((10, 11),
(0.899,
0.899))])
assert list(hierarchical(self.str_dupes, 1)) == []
assert list(zip(*hierarchical(self.str_dupes, 0.5)))[0] == ((b'1', b'2', b'3'), (b'4', b'5'))
assert list(zip(*hierarchical(self.str_dupes, 0)))[0] == ((b'1', b'2', b'3', b'4', b'5'),)
def test_greedy_matching(self):
greedyMatch = dedupe.clustering.greedyMatching
assert greedyMatch(self.bipartite_dupes,
threshold=0.5) == [((4, 6), 0.96),
((2, 7), 0.72),
((3, 8), 0.65)]
assert greedyMatch(self.bipartite_dupes,
threshold=0) == [((4, 6), 0.96),
((2, 7), 0.72),
((3, 8), 0.65),
((1, 5), 0.1)]
assert greedyMatch(self.bipartite_dupes,
threshold=0.8) == [((4, 6), 0.96)]
assert greedyMatch(self.bipartite_dupes,
threshold=1) == []
def test_gazette_matching(self):
gazetteMatch = dedupe.clustering.gazetteMatching
assert set(gazetteMatch(self.bipartite_dupes,
threshold=0.5)) == set([(((4, 6), 0.96),),
(((1, 6), 0.72),),
(((2, 7), 0.72),),
(((3, 6), 0.72),)])
assert set(gazetteMatch(self.bipartite_dupes,
threshold=0, n_matches=2)) == set([(((1, 6), 0.72),
((1, 8), 0.6)),
(((2, 7), 0.72),
((2, 8), 0.3)),
(((3, 6), 0.72),
((3, 8), 0.65)),
(((4, 6), 0.96),
((4, 5), 0.63)),
(((5, 8), 0.24),)])
assert set(gazetteMatch(self.bipartite_dupes,
threshold=0)) == set([(((4, 6), 0.96),),
(((1, 6), 0.72),),
(((2, 7), 0.72),),
(((3, 6), 0.72),),
(((5, 8), 0.24),)])
assert gazetteMatch(self.bipartite_dupes,
threshold=0.8) == [(((4,6), 0.96),)]
assert gazetteMatch(self.bipartite_dupes,
threshold=1) == []
class PredicatesTest(unittest.TestCase):
def test_predicates_correctness(self):
field = '123 16th st'
assert dedupe.predicates.existsPredicate(field) == ('1',)
assert dedupe.predicates.existsPredicate('') == ('0',)
assert dedupe.predicates.existsPredicate(1) == ('1',)
assert dedupe.predicates.existsPredicate(0) == ('0',)
assert dedupe.predicates.sortedAcronym(field) == ('11s',)
assert dedupe.predicates.wholeFieldPredicate(field) == ('123 16th st',)
assert dedupe.predicates.firstTokenPredicate(field) == ('123',)
assert dedupe.predicates.firstTokenPredicate('') == ()
assert dedupe.predicates.firstTokenPredicate('123/') == ('123',)
assert dedupe.predicates.tokenFieldPredicate(' ') == set([])
assert dedupe.predicates.tokenFieldPredicate(field) == set(['123', '16th', 'st'])
assert dedupe.predicates.commonIntegerPredicate(field) == set(['123', '16'])
assert dedupe.predicates.commonIntegerPredicate('foo') == set([])
assert dedupe.predicates.firstIntegerPredicate('foo') == ()
assert dedupe.predicates.firstIntegerPredicate('1foo') == ('1',)
assert dedupe.predicates.firstIntegerPredicate('f1oo') == ()
assert dedupe.predicates.sameThreeCharStartPredicate(field) == ('123',)
assert dedupe.predicates.sameThreeCharStartPredicate('12') == ()
assert dedupe.predicates.commonFourGram('12') == set([])
assert dedupe.predicates.sameFiveCharStartPredicate(field) == ('12316',)
assert dedupe.predicates.sameSevenCharStartPredicate(field) == ('12316th',)
assert dedupe.predicates.nearIntegersPredicate(field) == set(['15', '17', '16', '122', '123', '124'])
assert dedupe.predicates.commonFourGram(field) == set(['1231', '2316', '316t', '16th', '6ths', 'thst'])
assert dedupe.predicates.commonSixGram(field) == set(['12316t', '2316th', '316ths', '16thst'])
assert dedupe.predicates.initials(field,12) == ()
assert dedupe.predicates.initials(field,7) == ('123 16t',)
assert dedupe.predicates.ngrams(field,3) == ['123','23 ','3 1',' 16','16t','6th','th ','h s', ' st']
assert dedupe.predicates.commonTwoElementsPredicate((1,2,3)) == set(('1 2','2 3'))
assert dedupe.predicates.commonTwoElementsPredicate((1,)) == set([])
assert dedupe.predicates.commonThreeElementsPredicate((1,2,3)) == set(('1 2 3',))
assert dedupe.predicates.commonThreeElementsPredicate((1,)) == set([])
assert dedupe.predicates.fingerprint('time sandwich') == (u'sandwichtime',)
assert dedupe.predicates.oneGramFingerprint('sandwich time') == (u'acdehimnstw',)
assert dedupe.predicates.twoGramFingerprint('sandwich time') == (u'anchdwhticimmendsatiwi',)
assert dedupe.predicates.twoGramFingerprint('1') == ()
assert dedupe.predicates.commonTwoTokens('foo bar') == set([u'foo bar'])
assert dedupe.predicates.commonTwoTokens('foo') == set([])
if __name__ == "__main__":
unittest.main()
|
|
# encoding: latin2
"""Max-P-regions
"""
__author__ = "Juan C. Duque"
__credits__ = "Copyright (c) 2009-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
import copy
import numpy
import time as tm
from componentsAlg import AreaManager
from componentsAlg import BasicMemory
from componentsAlg import RegionMaker
__all__ = ['execMaxpTabu']
def execMaxpTabu(y, w, threshold=100.0, maxit=2, tabuLength=5, typeTabu="exact"):
"""Max-p-regions model (Tabu)
The max-p-regions model, devised by [Duque_Anselin_Rey2010]_ ,
clusters a set of geographic areas into the maximum number of homogeneous
regions such that the value of a spatially extensive regional attribute is
above a predefined threshold value. In clusterPy we measure heterogeneity as
the within-cluster sum of squares from each area to the attribute centroid
of its cluster.
The max-p-regions algorithm is composed of two main blocks:
- construction of a initial feasible solution.
- local improvement.
There are three methods for local improvement: Greedy (execMaxpGreedy),
Tabu (execMaxpTabu), and Simulated Annealing (execMaxpSa). A detailed
explanation of each method can be found in Duque, Anselin and Rey (2010) [Duque_Anselin_Rey2010]_.
For this version, the tabu search algorithm will stop after
max(10,N/maxP) nonimproving moves. ::
layer.cluster('maxpTabu',vars,<threshold>,<wType>,<std>,<maxit>,<tabuLength>,<typeTabu>,<dissolve>,<dataOperations>)
:keyword vars: Area attribute(s). Important: the last variable in vars correspond to the spatially extensive attribute that will be constrained to be above the predefined threshold value (e.g. ['SAR1','SAR2','POP'])
:type vars: list
:keyword threshold: Minimum value of the constrained variable at regional level. Default value threshold = 100.
:type threshold: integer
:keyword wType: Type of first-order contiguity-based spatial matrix: 'rook' or 'queen'. Default value wType = 'rook'.
:type wType: string
:keyword std: If = 1, then the variables will be standardized.
:type std: binary
:keyword maxit: Number of times that the construction phase is repeated. The larger the value the higher the possibility of getting a large number of regions. Default value maxit = 2.
:type maxit: integer
:keyword tabuLength: Number of times a reverse move is prohibited. Default value tabuLength = 85.
:type tabuLength: integer
:keyword typeTabu: Type of tabu search: (a) exact: chooses the best neighbouring solution for evaluation (it implies the enumeration of all the neighbouring solution at each iteration); (b) "random": evaluates a neighbouring solution selected at random and (See Ricca, F. and Simeone (2008) for more on the difference between exact and random tabu). Default value typeTabu = "exact".
:type typeTabu: string
:keyword dissolve: If = 1, then you will get a "child" instance of the layer that contains the new regions. Default value = 0. Note: Each child layer is saved in the attribute layer.results. The first algorithm that you run with dissolve=1 will have a child layer in layer.results[0]; the second algorithm that you run with dissolve=1 will be in layer.results[1], and so on. You can export a child as a shapefile with layer.result[<1,2,3..>].exportArcData('filename')
:type dissolve: binary
:keyword dataOperations: Dictionary which maps a variable to a list of operations to run on it. The dissolved layer will contains in it's data all the variables specified in this dictionary. Be sure to check the input layer's fieldNames before use this utility.
:type dataOperations: dictionary
The dictionary structure must be as showed bellow.
>>> X = {}
>>> X[variableName1] = [function1, function2,....]
>>> X[variableName2] = [function1, function2,....]
Where functions are strings which represents the name of the
functions to be used on the given variableName. Functions
could be,'sum','mean','min','max','meanDesv','stdDesv','med',
'mode','range','first','last','numberOfAreas. By deffault just
ID variable is added to the dissolved map.
"""
print "Running max-p-regions model (Duque, Anselin and Rey, 2010)"
print "Local search method: Tabu Search"
print "Number of areas: ", len(y)
print "threshold value: ", threshold
distanceType = "EuclideanSquared"
distanceStat = "Centroid";
objectiveFunctionType = "SS";
selectionType = "Minimum";
numRegionsType = "EndogenousThreshold";
# CONSTRUCTION PHASE 1: GROWING FEASIBLE REGIONS
start = tm.time()
# print w
# print y
am = AreaManager(w, y, distanceType)
maxP = 0
bestCandidates = {}
for i in range(maxit):
# print "**** Iteration %d of %d ..."%(i+1,maxit)
rm = RegionMaker(am,
distanceType = distanceType,
distanceStat = distanceStat,
selectionType = selectionType,
objectiveFunctionType = objectiveFunctionType,
numRegionsType = numRegionsType,
threshold = threshold)
numRegions = len(rm.feasibleRegions)
rm.getObj()
# print "rm.feasibleRegions",rm.feasibleRegions
# print "obj",rm.getObj()
if numRegions > maxP:
bestCandidates = {}
maxP = numRegions
obj = rm.objInfo
bestCandidates[obj] = rm.feasibleRegions
if numRegions == maxP:
obj = rm.objInfo
if obj in bestCandidates:
pass
else:
bestCandidates[obj] = rm.feasibleRegions
else:
pass
# print "bestCandidates", bestCandidates
ofValues = bestCandidates.keys()
basicMemory = BasicMemory()
while len(ofValues) >= 1:
# RECREATE SOLUTION
rm.resetNow()
minOfValue = min(ofValues)
ofValues.remove(minOfValue)
partialSolution = bestCandidates[minOfValue]
# print "ASSIGNING ENCLAVES"
# print partialSolution
regionId = 0
for growReg in partialSolution:
seedGrowReg = partialSolution[growReg][0]
rm.assignSeeds(seedGrowReg, regionId)
partialSolution[growReg].remove(seedGrowReg)
if len(partialSolution[growReg]) >= 1:
for areaInGrow in partialSolution[growReg]:
rm.assignArea(areaInGrow, regionId)
regionId += 1
# CONSTRUCTION PHASE 2: ENCLAVES ASSIGNATION
rm.feasibleRegions = copy.deepcopy(rm.region2Area)
rm.getIntraBorderingAreas()
rm.newExternal = set(rm.unassignedAreas)
if len(rm.unassignedAreas) != 0:
rm.constructionStage = "enclaves"
while len(rm.unassignedAreas) != 0:
rm.constructRegions()
rm.objInfo = rm.getObjective(rm.region2Area)
rm.feasibleRegions = copy.deepcopy(rm.region2Area)
rm.getIntraBorderingAreas()
# print "ASSIGNED SOLUTION"
# print "OBJ: ", rm.getObjective(rm.region2Area), rm.returnRegions()
rm.calculateRegionValueThreshold()
# LOCAL SEARCH
rm.calcObj()
convTabu = min(10,len(y)/maxP) # convTabu=230*numpy.sqrt(maxP)
# print "###ENTERING TABU",rm.objInfo,rm.returnRegions()
rm.tabuMove(tabuLength, convTabu = convTabu, typeTabu=typeTabu)
rm.calcObj()
# print "***** AFTER TABU",rm.objInfo,rm.returnRegions()
# EVALUATE SOLUTION
if rm.objInfo < basicMemory.objInfo:
basicMemory.updateBasicMemory(rm)
time = tm.time() - start
Sol = basicMemory.regions
Of = basicMemory.objInfo
print "FINAL SOLUTION: ", Sol
print "FINAL OF: ", Of
output = { "objectiveFunction": Of,
"runningTime": time,
"algorithm": "maxpTabu",
"regions": len(Sol),
"r2a": Sol,
"distanceType": distanceType,
"distanceStat": distanceStat,
"selectionType": selectionType,
"ObjectiveFuncionType": objectiveFunctionType}
print "Done"
return output
|
|
import clr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
clr.AddReference('OSIsoft.AFSDK')
clr.AddReference('System.Net')
import OSIsoft.AF as AF
from PI import config
from System.Net import NetworkCredential
from tqdm import tqdm
__all__ = [
'AF',
'PIDataFrame',
'get_server',
'get_tag',
'interpolated_values',
'search_tag_mask',
'search_tag',
'sample_data',
'sample_big_data',
'save_df',
'save_to_pandas',
'load_from_pickle'
]
class PIDataFrame(pd.DataFrame):
_metadata = ['PIAttributes']
@property
def _constructor(self):
return PIDataFrame
def get_server(server_name, login=None):
"""Connect to server"""
PI_server = AF.PI.PIServers()[server_name]
if login is not None:
PI_server.Connect(
NetworkCredential(*login),
AF.PI.PIAuthenticationMode.PIUserAuthentication
)
return PI_server
def get_tag(tag_name, server=None):
"""Get a tag.
Parameters
----------
tag_name : str
server : PI.PIServer, optional
PI server, if None the config.current server is used.
"""
if server is None and config.CURRENT_SERVER is None:
raise ValueError('Pass a server or set "PI.config.current_server"')
else:
server = config.CURRENT_SERVER
return AF.PI.PIPoint.FindPIPoint(server, tag_name)
def interpolated_values(tag, time_range, time_span):
"""Return an object with interpolated values
Parameters
----------
tag : TAG object or str
time_range : tuple
Tuple with start time and end time as str.
time_span : str
Time span (e.g.: '1s', '1d'...)
Returns
-------
interpolated_values
"""
if not isinstance(tag, AF.PI.PIPoint):
tag = get_tag(tag)
time_range_pi = AF.Time.AFTimeRange(*time_range)
time_span_pi = AF.Time.AFTimeSpan.Parse(time_span)
values = None
n_of_tries = 0
while values is None and n_of_tries < 3:
try:
values_pi = tag.InterpolatedValues(time_range_pi, time_span_pi, '', '')
values = [v.Value for v in values_pi]
except AF.PI.PITimeoutException:
n_of_tries += 1
print(f'PITimeout -> Number of tries: {n_of_tries}')
pass
except AF.PI.PIException:
print(f'Error when trying to get interpolated values for '
f'{tag}, {time_range}, {time_span}')
break
if values is None:
f = config.FREQUENCY[time_span]
number_of_samples = len(pd.date_range(
*pd.to_datetime(time_range, dayfirst=True), freq=f))
values = [np.nan for _ in range(number_of_samples)]
return values
def search_tag_mask(tag_mask, server=None):
"""Search by tag mask.
Parameters
----------
tag_mask : str
Tag mask (e.g.: *FI*290.033*)
server : PI.PIServer, optional
PI server, if None the config.current server is used.
Returns
-------
tags list: list
List with tags (as str) that match the search.
"""
if server is None and config.CURRENT_SERVER is None:
raise ValueError('Pass a server or set "PI.config.current_server"')
else:
server = config.CURRENT_SERVER
tags = AF.PI.PIPoint.FindPIPoints(server, tag_mask)
return [tag.Name for tag in tags]
def search_tag(tag, server=None):
"""Search by tag mask.
Parameters
----------
tag_mask : str
Tag mask (e.g.: *FI*290.033*)
server : PI.PIServer, optional
PI server, if None the config.current server is used.
Returns
-------
tags list: list
List with tags (as str) that match the search.
tags descriptors : list
List wit
"""
if server is None and config.CURRENT_SERVER is None:
raise ValueError('Pass a server or set "PI.config.current_server"')
else:
server = config.CURRENT_SERVER
tags = AF.PI.PIPoint.FindPIPoints(server, tag, True)
tag_names = [tag.Name for tag in tags]
tag_descr = [
tag.GetAttributes('').__getitem__('descriptor')
for tag in tags
]
return tag_names, tag_descr
def save_df(df, filename=None):
if filename is None:
start = df.index[0]
end = df.index[-1]
filename = (
f'{start.day}-{start.month}-{start.year}'
+ f'--{end.day}-{end.month}-{end.year}'
+ f'{end.freq.name}'
+ f'.df'
)
for ch in [':', '/', ' ']:
if ch in filename:
filename = filename.replace(ch, '_')
with open(filename, 'wb') as f:
pickle.dump([df, df.PIAttributes], f)
print(f'Saved as {filename}')
def save_to_pandas(file):
"""Saves to pandas DataFrame.
It will save PIAttributes separate from the pandas DataFrame.
Parameters
----------
file : file saved with save_df
"""
# load df
df = load_from_pickle(file)
# separate
PIAttributes = df.PIAttributes
df = pd.DataFrame(df)
# save
print(f'Saved as {file + "pd"}')
with open((file + 'pd'), 'wb') as f:
pickle.dump([df, PIAttributes], f)
def load_from_pickle(filename):
"""Load df from pickle.
This function will load the dataframe and the metadata
associated with PIAttributes.
Parameters
----------
filename : str
File name.
Returns
-------
df : DataFrame
A pandas DataFrame with the sample data.
"""
with open(filename, 'rb') as f:
df, PIAttributes = pickle.load(f)
df.PIAttributes = PIAttributes
return df
def sample_data(tags, time_range, time_span, save_data=False, server=None):
"""Get sample data.
Parameters
----------
tags : list
List with tags as str.
time_range : tuple
Tuple with start time and end time as str.
time_span : str
Time span (e.g.: '1s', '1d'...)
server : PI.PIServer, optional
PI server, if None the config.current server is used.
Returns
-------
sample_data : DataFrame
A pandas DataFrame with the sample data.
"""
if server is None and config.CURRENT_SERVER is None:
raise ValueError('Pass a server or set "PI.config.current_server"')
else:
server = config.CURRENT_SERVER
d = {}
PIAttributes = {}
for t in tags:
tag0 = get_tag(t, server=server)
d[t] = interpolated_values(tag0, time_range, time_span)
# create dictionary with descriptors
tagAttributes = {}
for descr in tag0.GetAttributes(''):
tagAttributes[str(descr.Key)] = str(descr.get_Value())
PIAttributes[str(tag0.Name)] = tagAttributes
# set date_range index
f = config.FREQUENCY[time_span]
p = len(d[tags[0]])
index = pd.date_range(
start=pd.to_datetime(time_range[0], dayfirst=True), periods=p, freq=f
)
try:
df = PIDataFrame(d, index=index)
except ValueError as exc:
df = PIDataFrame(d)
print('Index was not applied: ', exc)
# remove . and - so that tags are available with using 'df.'
df.columns = [
i.replace('.', '') for i in
[j.replace('-', '') for j in df.columns]
]
old_keys = list(PIAttributes.keys())
for k in old_keys:
new_key = k.replace('.', '').replace('-', '')
PIAttributes[new_key] = PIAttributes.pop(k)
df.PIAttributes = PIAttributes
# eliminate errors such as 'comm fail' before resampling
for col in df.columns:
df[col] = pd.to_numeric(df[col], errors='coerce')
if save_data is True:
save_df(df)
return df
def sample_big_data(tags, time_range, time_span, save_data=False, server=None):
"""Get sample data.
Parameters
----------
tags : list
List with tags as str.
time_range : tuple
Tuple with start time and end time as str.
time_span : str
Time span (e.g.: '1s', '1d'...)
server : PI.PIServer, optional
PI server, if None the config.current server is used.
Returns
-------
sample_data : DataFrame
A pandas DataFrame with the sample data.
"""
# change to pandas datetime to split the calls to PI
start = pd.to_datetime(time_range[0], dayfirst=True)
end = pd.to_datetime(time_range[1], dayfirst=True)
f = config.FREQUENCY[time_span]
date_range = pd.date_range(start, end, freq=f)
chunks = {'S': 1000, 'H': 100, 'D': 10}
ch = chunks[f] # number of chunks
if divmod(len(date_range), ch)[1] < 2: # avoid final step with 1 (st=end)
rng = len(date_range) // (ch + 1)
else:
rng = len(date_range) // ch
if rng == 0:
return sample_data(tags=tags, time_range=time_range, time_span=time_span, save_data=save_data)
for i in tqdm(range(rng), desc='Getting Data'):
start = date_range[ch * i]
end = date_range[(ch * (i + 1) - 1)]
# go back to PI string format before getting the data
st = start.strftime('%d/%m/%Y %H:%M:%S')
en = end.strftime('%d/%m/%Y %H:%M:%S')
time_range_pi = (st, en)
if i == 0:
df0 = sample_data(tags, time_range_pi, time_span, server=server)
# store PIAttributes to avoid losing them after append
PIAttributes = df0.PIAttributes
else:
df1 = sample_data(tags, time_range_pi, time_span, server=server)
df0 = df0.append(df1)
# last step
start = date_range[ch * (i + 1)]
end = date_range[-1]
# go back to PI string format before getting the data
st = start.strftime('%d/%m/%Y %H:%M:%S')
en = end.strftime('%d/%m/%Y %H:%M:%S')
time_range_pi = (st, en)
df1 = sample_data(tags, time_range_pi, time_span, server=server)
df0 = df0.append(df1) # we lose the frequency with append
df0 = df0.resample(f).mean() # get the frequency back with resample
df0.PIAttributes = PIAttributes
if save_data is True:
save_df(df0)
return df0
def PI_plot(tags, df, PIAttributes, ax=None):
"""Plot PI values.
Parameters
----------
df : pd.DataFrame
tags : str
String with the tags (e.g.: 'VI290003X VI290003Y')
ax : matplotlib.axes, optional
Matplotlib axes where data will be plotted.
If None creates a new.
returns
ax : matplotlib.axes
Matplotlib axes with plotted data.
"""
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
tags = tags.split(' ')
tagunits = {}
# checke how many units/axes
for tag in tags:
# check if unit is already in tagunits
if not PIAttributes[tag]['engunits'] in tagunits.values():
# create unit
tagunits[tag] = PIAttributes[tag]['engunits']
n_units = len(tagunits)
units = [i for i in tagunits.values()]
if n_units > 3:
raise Exception('Cannot plot more than 3 units')
if n_units == 1:
for tag in tags:
series = getattr(df, tag)
ax.plot(series, label=tag)
ax.legend()
else:
axes = [ax.twinx() for i in range(len(tagunits) - 1)]
axes.insert(0, ax)
if n_units == 2:
# set the labels
for _ax, unit in zip(axes, tagunits.values()):
_ax.set_ylabel(unit)
# check unit for each tag and plot to the correct axes
lines = []
labels = []
for tag in tags:
unit = PIAttributes[tag]['engunits']
idx = units.index(unit)
series = getattr(df, tag)
# Make solid lines for lines in ax0
if idx == 0:
line, = axes[idx].plot(series, label=tag)
else:
line, = axes[idx].plot(series, linestyle='--', alpha=0.3, label=tag)
lines.append(line)
labels.append(line.get_label())
ax = axes[0]
box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(lines, labels, loc='center left', bbox_to_anchor=(1.05, 0.5))
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if n_units == 3:
# set the labels
for _ax, unit in zip(axes, tagunits.values()):
_ax.set_ylabel(unit)
# check unit for each tag and plot to the correct axes
lines = []
labels = []
for tag in tags:
unit = PIAttributes[tag]['engunits']
idx = units.index(unit)
series = getattr(df, tag)
# Make solid lines for lines in ax0 and keep one color cycle
next_color = axes[0]._get_lines.get_next_color()
if idx == 0:
line, = axes[idx].plot(series, label=tag, color=next_color)
else:
line, = axes[idx].plot(series, linestyle='--', color=next_color, alpha=0.5, label=tag)
lines.append(line)
labels.append(line.get_label())
ax = axes[0]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(lines, labels, loc='center left', bbox_to_anchor=(1.2, 0.5))
# Make some space on the right side for the extra y-axis.
fig.subplots_adjust(right=0.75)
# Move the last y-axis spine over to the right by 20% of the width of the axes
axes[-1].spines['right'].set_position(('axes', 1.1))
# To make the border of the right-most axis visible, we need to turn the frame
# on. This hides the other plots, however, so we need to turn its fill off.
axes[-1].set_frame_on(True)
axes[-1].patch.set_visible(False)
|
|
# ===============================================================================
# Copyright 2017 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import unittest
import numpy as np
import rasterio
from rasterio.transform import Affine
from datetime import date
from sat_image.image import LandsatImage, Landsat5, Landsat7, Landsat8
class LandsatImageTestCase(unittest.TestCase):
def setUp(self):
self.dir_name_LT5 = 'data/image_test/lt5_image'
def test_earth_sun(self):
landsat = LandsatImage(self.dir_name_LT5)
dist_au = landsat.earth_sun_dist
self.assertAlmostEqual(dist_au, 1.01387, delta=0.01)
def test_mask_poly(self):
landsat = LandsatImage(self.dir_name_LT5)
shape = landsat.get_tile_geometry()
self.assertEqual(shape[0]['coordinates'][0],
[(367035.0, 5082585.0),
(388845.0, 5082585.0), (388845.0, 5060775.0),
(367035.0, 5060775.0), (367035.0, 5082585.0)])
def test_date(self):
landsat = LandsatImage(self.dir_name_LT5)
self.assertEqual(date(2006, 7, 6), landsat.date_acquired)
class Landsat5TestCase(unittest.TestCase):
def setUp(self):
self.dir_name_LT5 = 'data/image_test/lt5_image'
# results from fmask.exe
# bitbucket.org/chchrsc/python-fmask/
self.exp_reflect = 'data/image_test/lt5_image/LT5_reflct_10000x_b1.tif'
self.l5 = Landsat5(self.dir_name_LT5)
self.cell = 150, 150
def test_instantiate_scene(self):
self.assertTrue(self.l5.isdir)
self.assertEqual(self.l5.mtl['L1_METADATA_FILE']['PRODUCT_METADATA']['FILE_NAME_BAND_1'],
'LT05_L1TP_040028_20060706_20160909_01_T1_B1.TIF')
self.assertEqual((self.l5.rasterio_geometry['height'], self.l5.rasterio_geometry['width']),
(727, 727))
self.assertEqual(self.l5.utm_zone, 12)
self.assertEqual(self.l5.ex_atm_irrad, (1958.0, 1827.0, 1551.0,
1036.0, 214.9, np.nan, 80.65))
self.assertEqual(self.l5.rasterio_geometry['height'], 727)
self.assertEqual(self.l5.rasterio_geometry['driver'], 'GTiff')
self.assertEqual(self.l5.rasterio_geometry['dtype'], 'uint16')
self.assertEqual(self.l5.rasterio_geometry['transform'], Affine(30.0, 0.0, 367035.0, 0.0, -30.0, 5082585.0))
def test_reflectance(self):
toa_reflect = self.l5.reflectance(1)[self.cell]
qcal = self.l5._get_band('b1')[self.cell]
qcal_min = self.l5.quantize_cal_min_band_1
qcal_max = self.l5.quantize_cal_max_band_1
l_min = self.l5.radiance_minimum_band_1
l_max = self.l5.radiance_maximum_band_1
radiance = ((l_max - l_min) / (qcal_max - qcal_min)) * (qcal - qcal_min) + l_min
toa_reflect_test = (np.pi * radiance) / ((1 / (self.l5.earth_sun_dist ** 2)) * self.l5.ex_atm_irrad[0] * np.cos(
self.l5.solar_zenith_rad))
self.assertAlmostEqual(toa_reflect_test, toa_reflect, delta=0.001)
self.assertAlmostEqual(toa_reflect, 0.1105287, delta=0.001)
with rasterio.open(self.exp_reflect, 'r') as src:
reflct = src.read(1)
reflct = np.array(reflct, dtype=np.float32)
reflct[reflct == 32767.] = np.nan
reflct *= 1 / 10000.
self.assertAlmostEqual(reflct[self.cell], toa_reflect, delta=0.01)
def test_brightness(self):
bright = self.l5.brightness_temp(6)
self.assertAlmostEqual(bright[self.cell], 298.55, delta=0.01)
def test_albedo(self):
albedo = self.l5.albedo()[self.cell]
# inputs for self.cell toa reflect b 1, 3, 4, 5, 7
l = [0.11047232299890863, 0.094736151248181175, 0.22708428311416637, 0.23499215186750311, 0.13805073521100206]
exp_alb = (0.356 * l[0] + 0.130 * l[1] + 0.373 * l[2] + 0.085 * l[3] + 0.072 * l[4] - 0.0018) / 1.014
self.assertAlmostEqual(exp_alb, albedo, delta=0.001)
def test_saturation_mask(self):
green_mask = self.l5.saturation_mask(2)
red_mask = self.l5.saturation_mask(3)
green_sat_cell = 175, 381
red_sat_cell = 96, 305
self.assertTrue(green_mask[green_sat_cell])
self.assertTrue(red_mask[red_sat_cell])
def test_ndsi(self):
ndvi = self.l5.ndvi()[self.cell]
b4, b3 = self.l5.reflectance(4)[self.cell], self.l5.reflectance(3)[self.cell]
ndvi_exp = (b4 - b3) / (b4 + b3)
self.assertEqual(ndvi, ndvi_exp)
def test_ndsi(self):
ndsi = self.l5.ndsi()[self.cell]
b2, b5 = self.l5.reflectance(2)[self.cell], self.l5.reflectance(5)[self.cell]
ndsi_exp = (b2 - b5) / (b2 + b5)
self.assertEqual(ndsi, ndsi_exp)
class Landsat7TestCase(unittest.TestCase):
def setUp(self):
# results from fmask.exe
# bitbucket.org/chchrsc/python-fmask/
self.dir_name_LT7 = 'data/image_test/le7_image'
self.exp_reflect = 'data/image_test/le7_image/LE7_reflct_10000x_b1.tif'
self.l7 = Landsat7(self.dir_name_LT7)
self.cell = 300, 300
def test_instantiate_scene(self):
self.assertEqual(self.l7.mtl['L1_METADATA_FILE']['PRODUCT_METADATA']['FILE_NAME_BAND_1'],
'LE07_L1TP_039028_20100702_20160915_01_T1_B1.TIF')
self.assertEqual(self.l7.utm_zone, 12)
self.assertEqual(self.l7.ex_atm_irrad, (1970.0, 1842.0, 1547.0, 1044.0,
255.700, np.nan, 82.06, 1369.00))
self.assertEqual(self.l7.rasterio_geometry['height'], 727)
self.assertEqual(self.l7.rasterio_geometry['driver'], 'GTiff')
self.assertEqual(self.l7.rasterio_geometry['dtype'], 'uint8')
self.assertEqual(self.l7.rasterio_geometry['transform'], Affine(30.0, 0.0, 367035.0,
0.0, -30.0, 5082585.0))
def test_reflectance(self):
toa_reflect = self.l7.reflectance(1)
with rasterio.open(self.exp_reflect, 'r') as src:
reflct = src.read(1)
reflct = np.array(reflct, dtype=np.float32)
reflct[reflct == 32767.] = np.nan
reflct *= 1 / 10000.
toa_reflect = np.where(np.isnan(reflct), reflct, toa_reflect)
self.assertAlmostEqual(reflct[self.cell], toa_reflect[self.cell], delta=0.01)
def test_brightness(self):
bright = self.l7.brightness_temp(6)
self.assertAlmostEqual(bright[self.cell], 259.98, delta=0.01)
def test_albedo(self):
albedo = self.l7.albedo()[self.cell]
# inputs for self.cell toa reflect b 1, 3, 4, 5, 7
l = [0.30141704688299908, 0.26113788900694823, 0.37401738034983784, 0.15728264090788563, 0.11929144012910768]
exp_alb = (0.356 * l[0] + 0.130 * l[1] + 0.373 * l[2] + 0.085 * l[3] + 0.072 * l[4] - 0.0018) / 1.014
self.assertAlmostEqual(exp_alb, albedo, delta=0.001)
def test_saturation_mask(self):
green_mask = self.l7.saturation_mask(2)
red_mask = self.l7.saturation_mask(3)
green_sat_cell = 65, 398
red_sat_cell = 4, 52
self.assertTrue(green_mask[green_sat_cell])
self.assertTrue(red_mask[red_sat_cell])
def test_ndvi(self):
ndvi = self.l7.ndvi()
ndvi_cell = ndvi[self.cell]
b4, b3 = self.l7.reflectance(4)[self.cell], self.l7.reflectance(3)[self.cell]
ndvi_exp = (b4 - b3) / (b4 + b3)
self.assertEqual(ndvi_cell, ndvi_exp)
# home = os.path.expanduser('~')
# outdir = os.path.join(home, 'images', 'sandbox')
# self.l7.save_array(ndvi, os.path.join(outdir, 'ndvi.tif'))
def test_ndsi(self):
ndsi = self.l7.ndsi()[self.cell]
b2, b5 = self.l7.reflectance(2)[self.cell], self.l7.reflectance(5)[self.cell]
ndsi_exp = (b2 - b5) / (b2 + b5)
self.assertEqual(ndsi, ndsi_exp)
class Landsat8TestCase(unittest.TestCase):
def setUp(self):
self.dirname_cloud = 'data/image_test/lc8_image'
# results from rio-toa
self.ex_bright = os.path.join(self.dirname_cloud, 'LC8_brightemp_B10.TIF')
self.ex_reflect = os.path.join(self.dirname_cloud, 'LC8_reflct_B1.TIF')
self.cell = 300, 300
def test_instantiate_scene(self):
l8 = Landsat8(self.dirname_cloud)
self.assertEqual(l8.mtl['L1_METADATA_FILE']['PRODUCT_METADATA']['FILE_NAME_BAND_1'],
'LC80400282014193LGN00_B1.TIF')
self.assertEqual(l8.utm_zone, 12)
self.assertEqual(l8.rasterio_geometry['height'], 727)
self.assertEqual(l8.rasterio_geometry['driver'], 'GTiff')
self.assertEqual(l8.rasterio_geometry['dtype'], 'uint16')
self.assertEqual(l8.rasterio_geometry['transform'], Affine(30.0, 0.0, 367035.0,
0.0, -30.0, 5082585.0))
def test_toa_brightness(self):
l8 = Landsat8(self.dirname_cloud)
with rasterio.open(self.ex_bright, 'r') as src:
ex_br = src.read(1)
bright = l8.brightness_temp(10)
self.assertEqual(bright.shape, ex_br.shape)
self.assertAlmostEqual(ex_br[self.cell],
bright[self.cell],
delta=0.001)
def test_toa_reflectance(self):
l8 = Landsat8(self.dirname_cloud)
with rasterio.open(self.ex_reflect, 'r') as src:
expected_reflectance = src.read(1)
reflectance = l8.reflectance(1)
# toa has a problem
#
# self.assertAlmostEqual(expected_reflectance[self.cell],
# reflectance[self.cell],
# delta=0.001)
def test_albedo(self):
l8 = Landsat8(self.dirname_cloud)
albedo = l8.albedo()[self.cell]
# inputs for self.cell toa reflect b 2, 4, 5, 6, 7
l = [0.021763351, 0.065929502, 0.33231941, 0.2018306, 0.10294776]
exp_alb = (0.356 * l[0] + 0.130 * l[1] + 0.373 * l[2] + 0.085 * l[3] + 0.072 * l[4] - 0.0018) / 1.014
self.assertAlmostEqual(exp_alb, albedo, delta=0.001)
def test_ndvi(self):
l8 = Landsat8(self.dirname_cloud)
ndvi = l8.ndvi()[self.cell]
b5, b4 = l8.reflectance(5)[self.cell], l8.reflectance(4)[self.cell]
ndvi_exp = (b5 - b4) / (b5 + b4)
self.assertEqual(ndvi, ndvi_exp)
def test_ndsi(self):
l8 = Landsat8(self.dirname_cloud)
ndsi = l8.ndsi()[self.cell]
b3, b6 = l8.reflectance(3)[self.cell], l8.reflectance(6)[self.cell]
ndsi_exp = (b3 - b6) / (b3 + b6)
self.assertEqual(ndsi, ndsi_exp)
if __name__ == '__main__':
unittest.main()
# ===============================================================================
|
|
"""
This module implements the main Evennia server process, the core of
the game engine.
This module should be started with the 'twistd' executable since it
sets up all the networking features. (this is done automatically
by game/evennia.py).
"""
import time
import sys
import os
if os.name == 'nt':
# For Windows batchfile we need an extra path insertion here.
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
from twisted.web import server, static
from twisted.application import internet, service
from twisted.internet import reactor, defer
import django
django.setup()
from django.db import connection
from django.conf import settings
from src.players.models import PlayerDB
from src.scripts.models import ScriptDB
from src.server.models import ServerConfig
from src.server import initial_setup
from src.utils.utils import get_evennia_version, mod_import, make_iter
from src.comms import channelhandler
from src.server.sessionhandler import SESSIONS
# setting up server-side field cache
from django.db.models.signals import post_save
from src.server.caches import field_post_save
#pre_save.connect(field_pre_save, dispatch_uid="fieldcache")
post_save.connect(field_post_save, dispatch_uid="fieldcache")
#from src.server.caches import post_attr_update
#from django.db.models.signals import m2m_changed
# connect to attribute cache signal
#m2m_changed.connect(post_attr_update, sender=TypedObject.db_attributes.through)
_SA = object.__setattr__
if os.name == 'nt':
# For Windows we need to handle pid files manually.
SERVER_PIDFILE = os.path.join(settings.GAME_DIR, 'server.pid')
# a file with a flag telling the server to restart after shutdown or not.
SERVER_RESTART = os.path.join(settings.GAME_DIR, 'server.restart')
# module containing hook methods called during start_stop
SERVER_STARTSTOP_MODULE = mod_import(settings.AT_SERVER_STARTSTOP_MODULE)
# module containing plugin services
SERVER_SERVICES_PLUGIN_MODULES = [mod_import(module) for module in make_iter(settings.SERVER_SERVICES_PLUGIN_MODULES)]
#------------------------------------------------------------
# Evennia Server settings
#------------------------------------------------------------
SERVERNAME = settings.SERVERNAME
VERSION = get_evennia_version()
AMP_ENABLED = True
AMP_HOST = settings.AMP_HOST
AMP_PORT = settings.AMP_PORT
AMP_INTERFACE = settings.AMP_INTERFACE
WEBSERVER_PORTS = settings.WEBSERVER_PORTS
WEBSERVER_INTERFACES = settings.WEBSERVER_INTERFACES
GUEST_ENABLED = settings.GUEST_ENABLED
# server-channel mappings
WEBSERVER_ENABLED = settings.WEBSERVER_ENABLED and WEBSERVER_PORTS and WEBSERVER_INTERFACES
IMC2_ENABLED = settings.IMC2_ENABLED
IRC_ENABLED = settings.IRC_ENABLED
RSS_ENABLED = settings.RSS_ENABLED
WEBCLIENT_ENABLED = settings.WEBCLIENT_ENABLED
#------------------------------------------------------------
# Evennia Main Server object
#------------------------------------------------------------
class Evennia(object):
"""
The main Evennia server handler. This object sets up the database and
tracks and interlinks all the twisted network services that make up
evennia.
"""
def __init__(self, application):
"""
Setup the server.
application - an instantiated Twisted application
"""
sys.path.append('.')
# create a store of services
self.services = service.IServiceCollection(application)
self.amp_protocol = None # set by amp factory
self.sessions = SESSIONS
self.sessions.server = self
# Database-specific startup optimizations.
self.sqlite3_prep()
# Run the initial setup if needed
self.run_initial_setup()
self.start_time = time.time()
# initialize channelhandler
channelhandler.CHANNELHANDLER.update()
# set a callback if the server is killed abruptly,
# by Ctrl-C, reboot etc.
reactor.addSystemEventTrigger('before', 'shutdown',
self.shutdown, _reactor_stopping=True)
self.game_running = True
self.run_init_hooks()
# Server startup methods
def sqlite3_prep(self):
"""
Optimize some SQLite stuff at startup since we
can't save it to the database.
"""
if ((".".join(str(i) for i in django.VERSION) < "1.2" and settings.DATABASE_ENGINE == "sqlite3")
or (hasattr(settings, 'DATABASES')
and settings.DATABASES.get("default", {}).get('ENGINE', None)
== 'django.db.backends.sqlite3')):
cursor = connection.cursor()
cursor.execute("PRAGMA cache_size=10000")
cursor.execute("PRAGMA synchronous=OFF")
cursor.execute("PRAGMA count_changes=OFF")
cursor.execute("PRAGMA temp_store=2")
def update_defaults(self):
"""
We make sure to store the most important object defaults here, so
we can catch if they change and update them on-objects automatically.
This allows for changing default cmdset locations and default
typeclasses in the settings file and have them auto-update all
already existing objects.
"""
# setting names
settings_names = ("CMDSET_CHARACTER", "CMDSET_PLAYER",
"BASE_PLAYER_TYPECLASS", "BASE_OBJECT_TYPECLASS",
"BASE_CHARACTER_TYPECLASS", "BASE_ROOM_TYPECLASS",
"BASE_EXIT_TYPECLASS", "BASE_SCRIPT_TYPECLASS",
"BASE_CHANNEL_TYPECLASS")
# get previous and current settings so they can be compared
settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],
[settings.__getattr__(name) for name in settings_names])
mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]
if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()
# we have a changed default. Import relevant objects and
# run the update
from src.objects.models import ObjectDB
from src.comms.models import ChannelDB
#from src.players.models import PlayerDB
for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):
# update the database
print " %s:\n '%s' changed to '%s'. Updating unchanged entries in database ..." % (settings_names[i], prev, curr)
if i == 0:
[obj.__setattr__("cmdset_storage", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 1:
[ply.__setattr__("cmdset_storage", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]
if i == 2:
[ply.__setattr__("typeclass_path", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]
if i in (3, 4, 5, 6):
[obj.__setattr__("typeclass_path", curr) for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]
if i == 7:
[scr.__setattr__("typeclass_path", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]
if i == 8:
[scr.__setattr__("typeclass_path", curr) for scr in ChannelDB.objects.filter(db_typeclass_path__exact=prev)]
# store the new default and clean caches
ServerConfig.objects.conf(settings_names[i], curr)
ObjectDB.flush_instance_cache()
PlayerDB.flush_instance_cache()
ScriptDB.flush_instance_cache()
ChannelDB.flush_instance_cache()
# if this is the first start we might not have a "previous"
# setup saved. Store it now.
[ServerConfig.objects.conf(settings_names[i], tup[1])
for i, tup in enumerate(settings_compare) if not tup[0]]
def run_initial_setup(self):
"""
This attempts to run the initial_setup script of the server.
It returns if this is not the first time the server starts.
Once finished the last_initial_setup_step is set to -1.
"""
last_initial_setup_step = ServerConfig.objects.conf('last_initial_setup_step')
if not last_initial_setup_step:
# None is only returned if the config does not exist,
# i.e. this is an empty DB that needs populating.
print ' Server started for the first time. Setting defaults.'
initial_setup.handle_setup(0)
print '-' * 50
elif int(last_initial_setup_step) >= 0:
# a positive value means the setup crashed on one of its
# modules and setup will resume from this step, retrying
# the last failed module. When all are finished, the step
# is set to -1 to show it does not need to be run again.
print ' Resuming initial setup from step %(last)s.' % \
{'last': last_initial_setup_step}
initial_setup.handle_setup(int(last_initial_setup_step))
print '-' * 50
def run_init_hooks(self):
"""
Called every server start
"""
from src.objects.models import ObjectDB
#from src.players.models import PlayerDB
#update eventual changed defaults
self.update_defaults()
#print "run_init_hooks:", ObjectDB.get_all_cached_instances()
[(o.typeclass, o.at_init()) for o in ObjectDB.get_all_cached_instances()]
[(p.typeclass, p.at_init()) for p in PlayerDB.get_all_cached_instances()]
with open(SERVER_RESTART, 'r') as f:
mode = f.read()
if mode in ('True', 'reload'):
from src.server.oobhandler import OOB_HANDLER
OOB_HANDLER.restore()
from src.scripts.tickerhandler import TICKER_HANDLER
TICKER_HANDLER.restore()
# call correct server hook based on start file value
if mode in ('True', 'reload'):
# True was the old reload flag, kept for compatibilty
self.at_server_reload_start()
elif mode in ('reset', 'shutdown'):
self.at_server_cold_start()
# clear eventual lingering session storages
ObjectDB.objects.clear_all_sessids()
# always call this regardless of start type
self.at_server_start()
def set_restart_mode(self, mode=None):
"""
This manages the flag file that tells the runner if the server is
reloading, resetting or shutting down. Valid modes are
'reload', 'reset', 'shutdown' and None.
If mode is None, no change will be done to the flag file.
Either way, the active restart setting (Restart=True/False) is
returned so the server knows which more it's in.
"""
if mode is None:
with open(SERVER_RESTART, 'r') as f:
# mode is either shutdown, reset or reload
mode = f.read()
else:
with open(SERVER_RESTART, 'w') as f:
f.write(str(mode))
return mode
@defer.inlineCallbacks
def shutdown(self, mode=None, _reactor_stopping=False):
"""
Shuts down the server from inside it.
mode - sets the server restart mode.
'reload' - server restarts, no "persistent" scripts
are stopped, at_reload hooks called.
'reset' - server restarts, non-persistent scripts stopped,
at_shutdown hooks called.
'shutdown' - like reset, but server will not auto-restart.
None - keep currently set flag from flag file.
_reactor_stopping - this is set if server is stopped by a kill
command OR this method was already called
once - in both cases the reactor is
dead/stopping already.
"""
if _reactor_stopping and hasattr(self, "shutdown_complete"):
# this means we have already passed through this method
# once; we don't need to run the shutdown procedure again.
defer.returnValue(None)
mode = self.set_restart_mode(mode)
# call shutdown hooks on all cached objects
from src.objects.models import ObjectDB
#from src.players.models import PlayerDB
from src.server.models import ServerConfig
if mode == 'reload':
# call restart hooks
yield [(o.typeclass, o.at_server_reload())
for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.at_server_reload())
for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.pause(), s.at_server_reload())
for s in ScriptDB.get_all_cached_instances()]
yield self.sessions.all_sessions_portal_sync()
ServerConfig.objects.conf("server_restart_mode", "reload")
from src.server.oobhandler import OOB_HANDLER
OOB_HANDLER.save()
from src.scripts.tickerhandler import TICKER_HANDLER
TICKER_HANDLER.save()
self.at_server_reload_stop()
else:
if mode == 'reset':
# don't unset the is_connected flag on reset, otherwise
# same as shutdown
yield [(o.typeclass, o.at_server_shutdown())
for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.at_server_shutdown())
for p in PlayerDB.get_all_cached_instances()]
else: # shutdown
yield [_SA(p, "is_connected", False)
for p in PlayerDB.get_all_cached_instances()]
yield [(o.typeclass, o.at_server_shutdown())
for o in ObjectDB.get_all_cached_instances()]
yield [(p.typeclass, p.unpuppet_all(), p.at_server_shutdown())
for p in PlayerDB.get_all_cached_instances()]
yield [(s.typeclass, s.at_server_shutdown())
for s in ScriptDB.get_all_cached_instances()]
yield ObjectDB.objects.clear_all_sessids()
ServerConfig.objects.conf("server_restart_mode", "reset")
self.at_server_cold_stop()
# stopping time
from src.utils import gametime
gametime.save()
self.at_server_stop()
# if _reactor_stopping is true, reactor does not need to
# be stopped again.
if os.name == 'nt' and os.path.exists(SERVER_PIDFILE):
# for Windows we need to remove pid files manually
os.remove(SERVER_PIDFILE)
if not _reactor_stopping:
# this will also send a reactor.stop signal, so we set a
# flag to avoid loops.
self.shutdown_complete = True
reactor.callLater(0, reactor.stop)
# server start/stop hooks
def at_server_start(self):
"""
This is called every time the server starts up, regardless of
how it was shut down.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_start()
def at_server_stop(self):
"""
This is called just before a server is shut down, regardless
of it is fore a reload, reset or shutdown.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_stop()
def at_server_reload_start(self):
"""
This is called only when server starts back up after a reload.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_reload_start()
def at_server_reload_stop(self):
"""
This is called only time the server stops before a reload.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_reload_stop()
def at_server_cold_start(self):
"""
This is called only when the server starts "cold", i.e. after a
shutdown or a reset.
"""
if GUEST_ENABLED:
for guest in PlayerDB.objects.all().filter(db_typeclass_path=settings.BASE_GUEST_TYPECLASS):
for character in filter(None, guest.db._playable_characters):
character.delete()
guest.delete()
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_cold_start()
def at_server_cold_stop(self):
"""
This is called only when the server goes down due to a shutdown or reset.
"""
if SERVER_STARTSTOP_MODULE:
SERVER_STARTSTOP_MODULE.at_server_cold_stop()
#------------------------------------------------------------
#
# Start the Evennia game server and add all active services
#
#------------------------------------------------------------
# Tell the system the server is starting up; some things are not available yet
ServerConfig.objects.conf("server_starting_mode", True)
# twistd requires us to define the variable 'application' so it knows
# what to execute from.
application = service.Application('Evennia')
# The main evennia server program. This sets up the database
# and is where we store all the other services.
EVENNIA = Evennia(application)
print '-' * 50
print ' %(servername)s Server (%(version)s) started.' % {'servername': SERVERNAME, 'version': VERSION}
if AMP_ENABLED:
# The AMP protocol handles the communication between
# the portal and the mud server. Only reason to ever deactivate
# it would be during testing and debugging.
ifacestr = ""
if AMP_INTERFACE != '127.0.0.1':
ifacestr = "-%s" % AMP_INTERFACE
print ' amp (to Portal)%s: %s' % (ifacestr, AMP_PORT)
from src.server import amp
factory = amp.AmpServerFactory(EVENNIA)
amp_service = internet.TCPServer(AMP_PORT, factory, interface=AMP_INTERFACE)
amp_service.setName("EvenniaPortal")
EVENNIA.services.addService(amp_service)
if WEBSERVER_ENABLED:
# Start a django-compatible webserver.
from twisted.python import threadpool
from src.server.webserver import DjangoWebRoot, WSGIWebServer
# start a thread pool and define the root url (/) as a wsgi resource
# recognized by Django
threads = threadpool.ThreadPool(minthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[0]),
maxthreads=max(1, settings.WEBSERVER_THREADPOOL_LIMITS[1]))
web_root = DjangoWebRoot(threads)
# point our media resources to url /media
web_root.putChild("media", static.File(settings.MEDIA_ROOT))
# point our static resources to url /static
web_root.putChild("static", static.File(settings.STATIC_ROOT))
web_site = server.Site(web_root, logPath=settings.HTTP_LOG_FILE)
for proxyport, serverport in WEBSERVER_PORTS:
# create the webserver (we only need the port for this)
webserver = WSGIWebServer(threads, serverport, web_site, interface='127.0.0.1')
webserver.setName('EvenniaWebServer%s' % serverport)
EVENNIA.services.addService(webserver)
print " webserver: %s" % serverport
ENABLED = []
if IRC_ENABLED:
# IRC channel connections
ENABLED.append('irc')
if IMC2_ENABLED:
# IMC2 channel connections
ENABLED.append('imc2')
if RSS_ENABLED:
# RSS feed channel connections
ENABLED.append('rss')
if ENABLED:
print " " + ", ".join(ENABLED) + " enabled."
for plugin_module in SERVER_SERVICES_PLUGIN_MODULES:
# external plugin protocols
plugin_module.start_plugin_services(EVENNIA)
print '-' * 50 # end of terminal output
# clear server startup mode
ServerConfig.objects.conf("server_starting_mode", delete=True)
if os.name == 'nt':
# Windows only: Set PID file manually
f = open(os.path.join(settings.GAME_DIR, 'server.pid'), 'w')
f.write(str(os.getpid()))
f.close()
|
|
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import warnings
if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests":
print("Running gevent tests")
from gevent.monkey import patch_all
patch_all()
if __name__ == '__main__' and sys.argv[1] == "eventlet_nosetests":
print("Running eventlet tests")
from eventlet import monkey_patch
monkey_patch()
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
from distutils.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import (CCompilerError, DistutilsPlatformError,
DistutilsExecError)
from distutils.cmd import Command
PY3 = sys.version_info[0] == 3
try:
import subprocess
has_subprocess = True
except ImportError:
has_subprocess = False
from cassandra import __version__
long_description = ""
with open("README.rst") as f:
long_description = f.read()
try:
from nose.commands import nosetests
except ImportError:
gevent_nosetests = None
eventlet_nosetests = None
else:
class gevent_nosetests(nosetests):
description = "run nosetests with gevent monkey patching"
class eventlet_nosetests(nosetests):
description = "run nosetests with eventlet monkey patching"
has_cqlengine = False
if __name__ == '__main__' and sys.argv[1] == "install":
try:
import cqlengine
has_cqlengine = True
except ImportError:
pass
PROFILING = False
class DocCommand(Command):
description = "generate or test documentation"
user_options = [("test", "t",
"run doctests instead of generating documentation")]
boolean_options = ["test"]
def initialize_options(self):
self.test = False
def finalize_options(self):
pass
def run(self):
if self.test:
path = "docs/_build/doctest"
mode = "doctest"
else:
path = "docs/_build/%s" % __version__
mode = "html"
try:
os.makedirs(path)
except:
pass
if has_subprocess:
# Prevent run with in-place extensions because cython-generated objects do not carry docstrings
# http://docs.cython.org/src/userguide/special_methods.html#docstrings
import glob
for f in glob.glob("cassandra/*.so"):
print("Removing '%s' to allow docs to run on pure python modules." %(f,))
os.unlink(f)
# Build io extension to make import and docstrings work
try:
output = subprocess.check_output(
["python", "setup.py", "build_ext", "--inplace", "--force", "--no-murmur3", "--no-cython"],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError("Documentation step '%s' failed: %s: %s" % ("build_ext", exc, exc.output))
else:
print(output)
try:
output = subprocess.check_output(
["sphinx-build", "-b", mode, "docs", path],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError("Documentation step '%s' failed: %s: %s" % (mode, exc, exc.output))
else:
print(output)
print("")
print("Documentation step '%s' performed, results here:" % mode)
print(" file://%s/%s/index.html" % (os.path.dirname(os.path.realpath(__file__)), path))
class BuildFailed(Exception):
def __init__(self, ext):
self.ext = ext
murmur3_ext = Extension('cassandra.cmurmur3',
sources=['cassandra/cmurmur3.c'])
libev_ext = Extension('cassandra.io.libevwrapper',
sources=['cassandra/io/libevwrapper.c'],
include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include'],
libraries=['ev'],
library_dirs=['/usr/local/lib', '/opt/local/lib'])
platform_unsupported_msg = \
"""
===============================================================================
The optional C extensions are not supported on this platform.
===============================================================================
"""
arch_unsupported_msg = \
"""
===============================================================================
The optional C extensions are not supported on big-endian systems.
===============================================================================
"""
pypy_unsupported_msg = \
"""
=================================================================================
Some optional C extensions are not supported in PyPy. Only murmur3 will be built.
=================================================================================
"""
is_windows = os.name == 'nt'
is_pypy = "PyPy" in sys.version
if is_pypy:
sys.stderr.write(pypy_unsupported_msg)
is_supported_platform = sys.platform != "cli" and not sys.platform.startswith("java")
is_supported_arch = sys.byteorder != "big"
if not is_supported_platform:
sys.stderr.write(platform_unsupported_msg)
elif not is_supported_arch:
sys.stderr.write(arch_unsupported_msg)
try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch and not os.environ.get('CASS_DRIVER_NO_EXTENSIONS')
try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv
try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows
try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON')
try_cython &= 'egg_info' not in sys.argv # bypass setup_requires for pip egg_info calls, which will never have --install-option"--no-cython" coming fomr pip
sys.argv = [a for a in sys.argv if a not in ("--no-murmur3", "--no-libev", "--no-cython", "--no-extensions")]
build_concurrency = int(os.environ.get('CASS_DRIVER_BUILD_CONCURRENCY', '0'))
class NoPatchExtension(Extension):
# Older versions of setuptools.extension has a static flag which is set False before our
# setup_requires lands Cython. It causes our *.pyx sources to be renamed to *.c in
# the initializer.
# The other workaround would be to manually generate sources, but that bypasses a lot
# of the niceness cythonize embodies (setup build dir, conditional build, etc).
# Newer setuptools does not have this problem because it checks for cython dynamically.
# https://bitbucket.org/pypa/setuptools/commits/714c3144e08fd01a9f61d1c88411e76d2538b2e4
def __init__(self, *args, **kwargs):
# bypass the patched init if possible
if Extension.__bases__:
base, = Extension.__bases__
base.__init__(self, *args, **kwargs)
else:
Extension.__init__(self, *args, **kwargs)
class build_extensions(build_ext):
error_message = """
===============================================================================
WARNING: could not compile %s.
The C extensions are not required for the driver to run, but they add support
for token-aware routing with the Murmur3Partitioner.
On Windows, make sure Visual Studio or an SDK is installed, and your environment
is configured to build for the appropriate architecture (matching your Python runtime).
This is often a matter of using vcvarsall.bat from your install directory, or running
from a command prompt in the Visual Studio Tools Start Menu.
===============================================================================
""" if is_windows else """
===============================================================================
WARNING: could not compile %s.
The C extensions are not required for the driver to run, but they add support
for libev and token-aware routing with the Murmur3Partitioner.
Linux users should ensure that GCC and the Python headers are available.
On Ubuntu and Debian, this can be accomplished by running:
$ sudo apt-get install build-essential python-dev
On RedHat and RedHat-based systems like CentOS and Fedora:
$ sudo yum install gcc python-devel
On OSX, homebrew installations of Python should provide the necessary headers.
libev Support
-------------
For libev support, you will also need to install libev and its headers.
On Debian/Ubuntu:
$ sudo apt-get install libev4 libev-dev
On RHEL/CentOS/Fedora:
$ sudo yum install libev libev-devel
On OSX, via homebrew:
$ brew install libev
===============================================================================
"""
def run(self):
try:
self._setup_extensions()
build_ext.run(self)
except DistutilsPlatformError as exc:
sys.stderr.write('%s\n' % str(exc))
warnings.warn(self.error_message % "C extensions.")
def build_extensions(self):
if build_concurrency > 1:
self.check_extensions_list(self.extensions)
import multiprocessing.pool
multiprocessing.pool.ThreadPool(processes=build_concurrency).map(self.build_extension, self.extensions)
else:
build_ext.build_extensions(self)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError,
DistutilsPlatformError, IOError) as exc:
sys.stderr.write('%s\n' % str(exc))
name = "The %s extension" % (ext.name,)
warnings.warn(self.error_message % (name,))
def _setup_extensions(self):
# We defer extension setup until this command to leveraage 'setup_requires' pulling in Cython before we
# attempt to import anything
self.extensions = []
if try_murmur3:
self.extensions.append(murmur3_ext)
if try_libev:
self.extensions.append(libev_ext)
if try_cython:
try:
from Cython.Build import cythonize
cython_candidates = ['cluster', 'concurrent', 'connection', 'cqltypes', 'metadata',
'pool', 'protocol', 'query', 'util']
compile_args = [] if is_windows else ['-Wno-unused-function']
self.extensions.extend(cythonize(
[Extension('cassandra.%s' % m, ['cassandra/%s.py' % m],
extra_compile_args=compile_args)
for m in cython_candidates],
nthreads=build_concurrency,
exclude_failures=True))
self.extensions.extend(cythonize(NoPatchExtension("*", ["cassandra/*.pyx"], extra_compile_args=compile_args),
nthreads=build_concurrency))
except Exception:
sys.stderr.write("Failed to cythonize one or more modules. These will not be compiled as extensions (optional).\n")
def pre_build_check():
"""
Try to verify build tools
"""
if os.environ.get('CASS_DRIVER_NO_PRE_BUILD_CHECK'):
return True
try:
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.dist import Distribution
# base build_ext just to emulate compiler option setup
be = build_ext(Distribution())
be.initialize_options()
be.finalize_options()
# First, make sure we have a Python include directory
have_python_include = any(os.path.isfile(os.path.join(p, 'Python.h')) for p in be.include_dirs)
if not have_python_include:
sys.stderr.write("Did not find 'Python.h' in %s.\n" % (be.include_dirs,))
return False
compiler = new_compiler(compiler=be.compiler)
customize_compiler(compiler)
executables = []
if compiler.compiler_type in ('unix', 'cygwin'):
executables = [compiler.executables[exe][0] for exe in ('compiler_so', 'linker_so')]
elif compiler.compiler_type == 'nt':
executables = [getattr(compiler, exe) for exe in ('cc', 'linker')]
if executables:
from distutils.spawn import find_executable
for exe in executables:
if not find_executable(exe):
sys.stderr.write("Failed to find %s for compiler type %s.\n" % (exe, compiler.compiler_type))
return False
except Exception as exc:
sys.stderr.write('%s\n' % str(exc))
sys.stderr.write("Failed pre-build check. Attempting anyway.\n")
# if we are unable to positively id the compiler type, or one of these assumptions fails,
# just proceed as we would have without the check
return True
def run_setup(extensions):
kw = {'cmdclass': {'doc': DocCommand}}
if gevent_nosetests is not None:
kw['cmdclass']['gevent_nosetests'] = gevent_nosetests
if eventlet_nosetests is not None:
kw['cmdclass']['eventlet_nosetests'] = eventlet_nosetests
kw['cmdclass']['build_ext'] = build_extensions
kw['ext_modules'] = [Extension('DUMMY', [])] # dummy extension makes sure build_ext is called for install
if try_cython:
# precheck compiler before adding to setup_requires
# we don't actually negate try_cython because:
# 1.) build_ext eats errors at compile time, letting the install complete while producing useful feedback
# 2.) there could be a case where the python environment has cython installed but the system doesn't have build tools
if pre_build_check():
cython_dep = 'Cython>=0.20,<0.25'
user_specified_cython_version = os.environ.get('CASS_DRIVER_ALLOWED_CYTHON_VERSION')
if user_specified_cython_version is not None:
cython_dep = 'Cython==%s' % (user_specified_cython_version,)
kw['setup_requires'] = [cython_dep]
else:
sys.stderr.write("Bypassing Cython setup requirement\n")
dependencies = ['six >=1.9']
if not PY3:
dependencies.append('futures')
setup(
name='cassandra-driver',
version=__version__,
description='Python driver for Cassandra',
long_description=long_description,
url='http://github.com/datastax/python-driver',
author='Tyler Hobbs',
author_email='tyler@datastax.com',
packages=['cassandra', 'cassandra.io', 'cassandra.cqlengine'],
keywords='cassandra,cql,orm',
include_package_data=True,
install_requires=dependencies,
tests_require=['nose', 'mock<=1.0.1', 'PyYAML', 'pytz', 'sure'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
**kw)
run_setup(None)
if has_cqlengine:
warnings.warn("\n#######\n'cqlengine' package is present on path: %s\n"
"cqlengine is now an integrated sub-package of this driver.\n"
"It is recommended to remove this package to reduce the chance for conflicting usage" % cqlengine.__file__)
|
|
#!/usr/bin/env python
#
#CustomScript extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+
#
import array
import base64
import os
import os.path
import re
import string
import subprocess
import sys
import imp
import shlex
import traceback
import urllib2
import urlparse
import time
import shutil
import json
from codecs import *
from azure.storage import BlobService
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as Util
# Global Variables
mfile = os.path.join(os.getcwd(), 'HandlerManifest.json')
with open(mfile,'r') as f:
manifest = json.loads(f.read())[0]
ExtensionShortName = manifest['name']
Version = manifest['version']
DownloadDirectory = 'download'
StdoutFile = "stdout"
ErroutFile = "errout"
OutputSize = 4 * 1024
# CustomScript-specific Operation
DownloadOp = "Download"
RunScriptOp = "RunScript"
# Change permission of log path
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod('/var/log/azure/', 0700)
#Main function is the only entrence to this extension handler
def main():
#Global Variables definition
waagent.LoggerInit('/var/log/waagent.log','/dev/stdout')
waagent.Log("%s started to handle." %(ExtensionShortName))
try:
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
dummy_command("Disable", "success", "Disable succeeded")
elif re.match("^([-/]*)(uninstall)", a):
dummy_command("Uninstall", "success", "Uninstall succeeded")
elif re.match("^([-/]*)(install)", a):
dummy_command("Install", "success", "Install succeeded")
elif re.match("^([-/]*)(enable)", a):
hutil = parse_context("Enable")
enable(hutil)
elif re.match("^([-/]*)(daemon)", a):
hutil = parse_context("Executing")
daemon(hutil)
elif re.match("^([-/]*)(update)", a):
dummy_command("Update", "success", "Update succeeded")
except Exception, e:
err_msg = ("Failed with error: {0}, "
"{1}").format(e, traceback.format_exc())
waagent.Error(err_msg)
hutil.error(err_msg)
hutil.do_exit(1, 'Enable','failed','0',
'Enable failed: {0}'.format(err_msg))
def dummy_command(operation, status, msg):
hutil = parse_context(operation)
hutil.do_exit(0, operation, status, '0', msg)
def parse_context(operation):
hutil = Util.HandlerUtility(waagent.Log, waagent.Error, ExtensionShortName)
hutil.do_parse_context(operation)
return hutil
def enable(hutil):
"""
Ensure the same configuration is executed only once
If the previous enable failed, we do not have retry logic here,
since the custom script may not work in an intermediate state.
"""
hutil.exit_if_enabled()
start_daemon(hutil)
def download_files_with_retry(hutil, retry_count, wait):
hutil.log(("Will try to download files, "
"number of retries = {0}, "
"wait SECONDS between retrievals = {1}s").format(retry_count, wait))
for download_retry_count in range(0, retry_count + 1):
try:
download_files(hutil)
break
except Exception, e:
error_msg = ("Failed to download files, "
"retry = {0}, maxRetry = {1}.").format(download_retry_count, retry_count)
hutil.error(error_msg)
if download_retry_count < retry_count:
hutil.log("Sleep {0} seconds".format(wait))
time.sleep(wait)
else:
waagent.AddExtensionEvent(name=ExtensionShortName,
op=DownloadOp,
isSuccess=False,
version=Version,
message="(01100)"+error_msg)
raise
msg = ("Succeeded to download files, "
"retry count = {0}").format(download_retry_count)
hutil.log(msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=DownloadOp,
isSuccess=True,
version=Version,
message="(01303)"+msg)
return retry_count - download_retry_count
def check_idns_with_retry(hutil, retry_count, wait):
is_idns_ready = False
for check_idns_retry_count in range(0, retry_count + 1):
is_idns_ready = check_idns()
if is_idns_ready:
break
else:
if check_idns_retry_count < retry_count:
hutil.error("Internal DNS is not ready, retry to check.")
hutil.log("Sleep {0} seconds".format(wait))
time.sleep(wait)
if is_idns_ready:
msg = ("Internal DNS is ready, "
"retry count = {0}").format(check_idns_retry_count)
hutil.log(msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op="CheckIDNS",
isSuccess=True,
version=Version,
message="(01306)"+msg)
else:
error_msg = ("Internal DNS is not ready, "
"retry count = {0}, ignore it.").format(check_idns_retry_count)
hutil.error(error_msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op="CheckIDNS",
isSuccess=False,
version=Version,
message="(01306)"+error_msg)
def check_idns():
ret = waagent.Run("host $(hostname)")
return (not ret)
def download_files(hutil):
public_settings = hutil.get_public_settings()
if public_settings is None:
raise ValueError("Public configuration couldn't be None.")
cmd = get_command_to_execute(hutil)
blob_uris = public_settings.get('fileUris')
protected_settings = hutil.get_protected_settings()
storage_account_name = None
storage_account_key = None
if protected_settings:
storage_account_name = protected_settings.get("storageAccountName")
storage_account_key = protected_settings.get("storageAccountKey")
if storage_account_name is not None:
storage_account_name = storage_account_name.strip()
if storage_account_key is not None:
storage_account_key = storage_account_key.strip()
if (not blob_uris or not isinstance(blob_uris, list) or len(blob_uris) == 0):
error_msg = "fileUris value provided is empty or invalid."
hutil.log(error_msg + " Continue with executing command...")
waagent.AddExtensionEvent(name=ExtensionShortName,
op=DownloadOp,
isSuccess=False,
version=Version,
message="(01001)"+error_msg)
return
hutil.do_status_report('Downloading','transitioning', '0',
'Downloading files...')
if storage_account_name and storage_account_key:
hutil.log("Downloading scripts from azure storage...")
download_blobs(storage_account_name,
storage_account_key,
blob_uris,
cmd,
hutil)
elif not(storage_account_name or storage_account_key):
hutil.log("No azure storage account and key specified in protected "
"settings. Downloading scripts from external links...")
download_external_files(blob_uris, cmd, hutil)
else:
#Storage account and key should appear in pairs
error_msg = "Azure storage account and key should appear in pairs."
hutil.error(error_msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=DownloadOp,
isSuccess=False,
version=Version,
message="(01000)"+error_msg)
raise ValueError(error_msg)
def start_daemon(hutil):
cmd = get_command_to_execute(hutil)
if cmd:
hutil.log("Command to execute:" + cmd)
args = [os.path.join(os.getcwd(), __file__), "-daemon"]
# This process will start a new background process by calling
# customscript.py -daemon
# to run the script and will exit itself immediatelly.
# Redirect stdout and stderr to /dev/null. Otherwise daemon process
# will throw Broke pipe exeception when parent process exit.
devnull = open(os.devnull, 'w')
child = subprocess.Popen(args, stdout=devnull, stderr=devnull)
hutil.do_exit(0, 'Enable', 'transitioning', '0',
'Launching the script...')
else:
error_msg = "CommandToExecute is empty or invalid"
hutil.error(error_msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=False,
version=Version,
message="(01002)"+error_msg)
raise ValueError(error_msg)
def daemon(hutil):
retry_count = 10
wait = 20
enable_idns_check = True
public_settings = hutil.get_public_settings()
if public_settings:
if 'retrycount' in public_settings:
retry_count = public_settings.get('retrycount')
if 'wait' in public_settings:
wait = public_settings.get('wait')
if 'enableInternalDNSCheck' in public_settings:
enable_idns_check = public_settings.get('enableInternalDNSCheck')
prepare_download_dir(hutil.get_seq_no())
retry_count = download_files_with_retry(hutil, retry_count, wait)
# The internal DNS needs some time to be ready.
# Wait and retry to check if there is time in retry window.
# The check may be removed safely if iDNS is always ready.
if enable_idns_check:
check_idns_with_retry(hutil, retry_count, wait)
cmd = get_command_to_execute(hutil)
args = parse_args(cmd)
if args:
run_script(hutil, args)
else:
error_msg = "CommandToExecute is empty or invalid."
hutil.error(error_msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=False,
version=Version,
message="(01002)"+error_msg)
raise ValueError(error_msg)
def run_script(hutil, args, interval = 30):
download_dir = prepare_download_dir(hutil.get_seq_no())
std_out_file = os.path.join(download_dir, StdoutFile)
err_out_file = os.path.join(download_dir, ErroutFile)
std_out = None
err_out = None
try:
std_out = open(std_out_file, "w")
err_out = open(err_out_file, "w")
start_time = time.time()
child = subprocess.Popen(args,
cwd = download_dir,
stdout=std_out,
stderr=err_out)
time.sleep(1)
while child.poll() == None:
msg = get_formatted_log("Script is running...",
tail(std_out_file), tail(err_out_file))
hutil.log(msg)
hutil.do_status_report('Enable', 'transitioning', '0', msg)
time.sleep(interval)
if child.returncode and child.returncode != 0:
msg = get_formatted_log("Script returned an error.",
tail(std_out_file), tail(err_out_file))
hutil.error(msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=False,
version=Version,
message="(01302)"+msg)
hutil.do_exit(1, 'Enable', 'failed', '1', msg)
else:
msg = get_formatted_log("Script is finished.",
tail(std_out_file), tail(err_out_file))
hutil.log(msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=True,
version=Version,
message="(01302)"+msg)
end_time = time.time()
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=True,
version=Version,
message=("(01304)Script executing time: "
"{0}s").format(str(end_time-start_time)))
hutil.do_exit(0, 'Enable', 'success','0', msg)
except Exception, e:
error_msg = ("Failed to launch script with error: {0},"
"stacktrace: {1}").format(e, traceback.format_exc())
hutil.error(error_msg)
waagent.AddExtensionEvent(name=ExtensionShortName,
op=RunScriptOp,
isSuccess=False,
version=Version,
message="(01101)"+error_msg)
hutil.do_exit(1, 'Enable', 'failed', '1',
'Lanch script failed: {0}'.format(e))
finally:
if std_out:
std_out.close()
if err_out:
err_out.close()
def download_blobs(storage_account_name, storage_account_key,
blob_uris, command, hutil):
for blob_uri in blob_uris:
if blob_uri:
download_blob(storage_account_name,
storage_account_key,
blob_uri,
command,
hutil)
def download_blob(storage_account_name, storage_account_key,
blob_uri, command, hutil):
try:
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
result = download_and_save_blob(storage_account_name,
storage_account_key,
blob_uri,
download_dir)
blob_name, _, _, download_path = result
preprocess_files(download_path, hutil)
if command and blob_name in command:
os.chmod(download_path, 0100)
except Exception, e:
hutil.error(("Failed to download blob with uri: {0} "
"with error {1}").format(blob_uri,e))
raise
def download_and_save_blob(storage_account_name,
storage_account_key,
blob_uri,
download_dir):
container_name = get_container_name_from_uri(blob_uri)
blob_name = get_blob_name_from_uri(blob_uri)
host_base = get_host_base_from_uri(blob_uri)
# If blob_name is a path, extract the file_name
last_sep = blob_name.rfind('/')
if last_sep != -1:
file_name = blob_name[last_sep+1:]
else:
file_name = blob_name
download_path = os.path.join(download_dir, file_name)
# Guest agent already ensure the plugin is enabled one after another.
# The blob download will not conflict.
blob_service = BlobService(storage_account_name,
storage_account_key,
host_base=host_base)
blob_service.get_blob_to_path(container_name, blob_name, download_path)
return (blob_name, container_name, host_base, download_path)
def download_external_files(uris, command, hutil):
for uri in uris:
if uri:
download_external_file(uri, command, hutil)
def download_external_file(uri, command, hutil):
seqNo = hutil.get_seq_no()
download_dir = prepare_download_dir(seqNo)
path = get_path_from_uri(uri)
file_name = path.split('/')[-1]
file_path = os.path.join(download_dir, file_name)
try:
download_and_save_file(uri, file_path)
preprocess_files(file_path, hutil)
if command and file_name in command:
os.chmod(file_path, 0100)
except Exception, e:
hutil.error(("Failed to download external file with uri: {0} "
"with error {1}").format(uri, e))
raise
def download_and_save_file(uri, file_path, timeout=30, buf_size=1024):
src = urllib2.urlopen(uri, timeout=timeout)
with open(file_path, 'wb') as dest:
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def preprocess_files(file_path, hutil):
"""
The file is preprocessed if it satisfies any of the following
condistions:
the file's extension is '.sh' or '.py'
the content of the file starts with '#!'
"""
ret = to_process(file_path)
if ret:
dos2unix(file_path)
hutil.log("Converting {0} from DOS to Unix formats: Done".format(file_path))
remove_bom(file_path)
hutil.log("Removing BOM of {0}: Done".format(file_path))
def to_process(file_path, extensions=['.sh', ".py"]):
for extension in extensions:
if file_path.endswith(extension):
return True
with open(file_path, 'rb') as f:
contents = f.read(64)
if '#!' in contents:
return True
return False
def dos2unix(file_path):
with open(file_path, 'rU') as f:
contents = f.read()
temp_file_path = file_path + ".tmp"
with open(temp_file_path, 'wb') as f_temp:
f_temp.write(contents)
shutil.move(temp_file_path, file_path)
def remove_bom(file_path):
with open(file_path, 'rb') as f:
contents = f.read()
bom_list = [BOM, BOM_BE, BOM_LE, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF8]
for bom in bom_list:
if contents.startswith(bom):
break
else:
return
new_contents = None
for encoding in ["utf-8-sig", "utf-16"]:
try:
new_contents = contents.decode(encoding).encode('utf-8')
break
except UnicodeDecodeError:
continue
if new_contents is not None:
temp_file_path = file_path + ".tmp"
with open(temp_file_path, 'wb') as f_temp:
f_temp.write(new_contents)
shutil.move(temp_file_path, file_path)
def get_blob_name_from_uri(uri):
return get_properties_from_uri(uri)['blob_name']
def get_container_name_from_uri(uri):
return get_properties_from_uri(uri)['container_name']
def get_host_base_from_uri(blob_uri):
uri = urlparse.urlparse(blob_uri)
netloc = uri.netloc
if netloc is None:
return None
return netloc[netloc.find('.'):]
def get_properties_from_uri(uri):
path = get_path_from_uri(uri)
if path.endswith('/'):
path = path[:-1]
if path[0] == '/':
path = path[1:]
first_sep = path.find('/')
if first_sep == -1:
hutil.error("Failed to extract container, blob, from {}".format(path))
blob_name = path[first_sep+1:]
container_name = path[:first_sep]
return {'blob_name': blob_name, 'container_name': container_name}
def get_path_from_uri(uriStr):
uri = urlparse.urlparse(uriStr)
return uri.path
def prepare_download_dir(seqNo):
download_dir_main = os.path.join(os.getcwd(), DownloadDirectory)
create_directory_if_not_exists(download_dir_main)
download_dir = os.path.join(download_dir_main, seqNo)
create_directory_if_not_exists(download_dir)
return download_dir
def create_directory_if_not_exists(directory):
"""create directory if no exists"""
if not os.path.exists(directory):
os.makedirs(directory)
def parse_args(cmd):
cmd = filter(lambda x : x in string.printable, cmd)
cmd = cmd.decode("ascii", "ignore")
args = shlex.split(cmd)
# From python 2.6 to python 2.7.2, shlex.split output UCS-4 result like
# '\x00\x00a'. Temp workaround is to replace \x00
for idx, val in enumerate(args):
if '\x00' in args[idx]:
args[idx] = args[idx].replace('\x00', '')
return args
def tail(log_file, output_size = OutputSize):
pos = min(output_size, os.path.getsize(log_file))
with open(log_file, "r") as log:
log.seek(-pos, 2)
buf = log.read(output_size)
buf = filter(lambda x: x in string.printable, buf)
return buf.decode("ascii", "ignore")
def get_formatted_log(summary, stdout, stderr):
msg_format = ("{0}\n"
"---stdout---\n"
"{1}\n"
"---errout---\n"
"{2}\n")
return msg_format.format(summary, stdout, stderr)
def get_command_to_execute(hutil):
public_settings = hutil.get_public_settings()
protected_settings = hutil.get_protected_settings()
cmd_public = public_settings.get('commandToExecute')
cmd_protected = None
if protected_settings is not None:
cmd_protected = protected_settings.get('commandToExecute')
if cmd_public and cmd_protected:
err_msg = ("commandToExecute was specified both in public settings "
"and protected settings. It can only be specified in one of them.")
hutil.error(err_msg)
hutil.do_exit(1, 'Enable','failed','0',
'Enable failed: {0}'.format(err_msg))
return cmd_public if cmd_public else cmd_protected
if __name__ == '__main__' :
main()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from frappe.utils.minify import JavascriptMinify
import subprocess
import warnings
from six import iteritems, text_type
"""
Build the `public` folders and setup languages
"""
import os, frappe, json, shutil, re
app_paths = None
def setup():
global app_paths
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError: pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
def bundle(no_compress, make_copy=False, restore=False, verbose=False):
"""concat / minify js files"""
# build js files
setup()
make_asset_dirs(make_copy=make_copy, restore=restore)
# new nodejs build system
command = 'node --use_strict ../apps/frappe/frappe/build.js --build'
if not no_compress:
command += ' --minify'
subprocess.call(command.split(' '))
# build(no_compress, verbose)
def watch(no_compress):
"""watch and rebuild if necessary"""
# new nodejs file watcher
command = 'node --use_strict ../apps/frappe/frappe/build.js --watch'
subprocess.call(command.split(' '))
# setup()
# import time
# compile_less()
# build(no_compress=True)
# while True:
# compile_less()
# if files_dirty():
# build(no_compress=True)
# time.sleep(3)
def make_asset_dirs(make_copy=False, restore=False):
# don't even think of making assets_path absolute - rm -rf ahead.
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# symlink app/public > assets/app
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
symlinks = []
symlinks.append([os.path.join(app_base_path, 'public'), os.path.join(assets_path, app_name)])
symlinks.append([os.path.join(app_base_path, 'docs'), os.path.join(assets_path, app_name + '_docs')])
for source, target in symlinks:
source = os.path.abspath(source)
if os.path.exists(source):
if restore:
if os.path.exists(target):
if os.path.islink(target):
os.unlink(target)
else:
shutil.rmtree(target)
shutil.copytree(source, target)
elif make_copy:
if os.path.exists(target):
warnings.warn('Target {target} already exists.'.format(target = target))
else:
shutil.copytree(source, target)
else:
if os.path.exists(target):
if os.path.islink(target):
os.unlink(target)
else:
shutil.rmtree(target)
os.symlink(source, target)
else:
warnings.warn('Source {source} does not exist.'.format(source = source))
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in iteritems(get_build_maps()):
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in iteritems(json.loads(f.read())):
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except ValueError as e:
print(path)
print('JSON syntax error {0}'.format(str(e)))
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from six import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print("did not find " + f)
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = text_type(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
if minified:
outtxt += text_type(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print("{0}: {1}k".format(f, int(len(minified) / 1024)))
elif outtype=="js" and extn=="html":
# add to frappe.templates
outtxt += html_to_js_template(f, data)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception:
print("--Error in:" + f + "--")
print(frappe.get_traceback())
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print("Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024))))
def html_to_js_template(path, content):
'''returns HTML template content as Javascript code, adding it to `frappe.templates`'''
return """frappe.templates["{key}"] = '{content}';\n""".format(\
key=path.rsplit("/", 1)[-1][:-5], content=scrub_html_template(content))
def scrub_html_template(content):
'''Returns HTML content with removed whitespace and comments'''
# remove whitespace to a single space
content = re.sub("\s+", " ", content)
# strip comments
content = re.sub("(<!--.*?-->)", "", content)
return content.replace("'", "\'")
def files_dirty():
for target, sources in iteritems(get_build_maps()):
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print(f + ' dirty')
return True
else:
return False
def compile_less():
from distutils.spawn import find_executable
if not find_executable("lessc"):
return
for path in app_paths:
less_path = os.path.join(path, "public", "less")
if os.path.exists(less_path):
for fname in os.listdir(less_path):
if fname.endswith(".less") and fname != "variables.less":
fpath = os.path.join(less_path, fname)
mtime = os.path.getmtime(fpath)
if fpath in timestamps and mtime == timestamps[fpath]:
continue
timestamps[fpath] = mtime
print("compiling {0}".format(fpath))
css_path = os.path.join(path, "public", "css", fname.rsplit(".", 1)[0] + ".css")
os.system("lessc {0} > {1}".format(fpath, css_path))
|
|
import os
import time
import commands
from dircache import listdir
from glob import glob
from pUtil import tolog
from JobState import JobState
class Cleaner:
"""
This class is used to clean up lingering old/lost jobs.
The clean-up criteria is that for a found Panda job directory,
if the pilotlog.txt has not been updated for at least <limit>
hours, and if the job state is 'running' then the assumption is
that the job was unexpectedly terminated and should be erased from disk.
The class defines the clean-up limit, but overrides this value if set
in schedconfig.
The cleanup() method should be executed after queuedata has been downloaded
and after job recovery (which might or might not be turned on).
Usage:
from Cleaner import Cleaner
cleaner = Cleaner(limit=<limit>, path=<path>, uflag=<uflag>)
ec = cleaner.cleanup()
cleanup() will return True for a successful/performed cleanup, False otherwise.
<path> should normally be thisSite.wntmpdir
<limit> should be an integer > 0 [hours]
<uflag> user flag needed to distinguish job type (an analysis pilot is not allowed
to touch production job directories on some sites)
"""
def __init__(self, limit=12, path="/tmp", uflag=None):
""" Default init with verification """
self.clean = True
self.uflag = None
# verify the clean-up limit
_type = str(limit.__class__)
if limit and _type.find('int') == -1:
tolog("Trying to convert limit from type %s to int" % (_type))
try:
limit = int(limit)
except:
tolog("Failed to convert, reset to default")
limit = 12
if limit == 0:
tolog("Clean-up limit set to zero (no clean-up will be done)")
self.clean = False
elif limit < 0 or not limit:
limit = 12
tolog("!!WARNING!!5500!! Clean-up limit out of bounds, reset to default: %d" % (limit))
self.limit = limit
tolog("Cleaner initialized with clean-up limit: %d hours" % (self.limit))
# verify the clean-up path and set the uflag if necessary
if self.clean:
if not path:
path = "/tmp"
tolog("Requested path reset to default: %s" % (path))
if os.path.exists(path):
self.path = path
tolog("Cleaner will scan for lost directories in verified path: %s" % (self.path))
if uflag:
self.uflag = uflag
else:
tolog("!!WARNING!!5500!! No such directory: %s (clean-up not possible)" % (path))
self.path = None
self.clean = False
def cleanup(self):
""" execute the clean-up """
status = True
number_of_cleanups = 0
if self.clean:
tolog("Executing empty dirs clean-up, stage 1/5")
Cleaner.purgeEmptyDirs(self.path)
tolog("Executing work dir clean-up, stage 2/5")
Cleaner.purgeWorkDirs(self.path)
tolog("Executing maxed-out dirs clean-up, stage 3/5")
Cleaner.purgeMaxedoutDirs(self.path)
tolog("Executing AthenaMP clean-up, stage 4/5 <SKIPPED>")
#files = ['AthenaMP_*', 'fifo_*', 'TokenExtractorChannel*', 'zmq_EventService*', 'asetup*', 'tmp*.pkl']
#for f in files:
# Cleaner.purgeFiles(self.path, f, limit=48*3600)
tolog("Executing PanDA Pilot dir clean-up, stage 5/5")
JS = JobState()
# grab all job state files in all work directories
job_state_files = glob(self.path + "/Panda_Pilot_*/jobState-*.pickle")
number_of_files = len(job_state_files)
file_number = 0
max_cleanups = 30
tolog("Number of found job state files: %d" % (number_of_files))
if job_state_files:
# loop over all found job state files
for file_path in job_state_files:
file_number += 1
if file_number > max_cleanups:
tolog("Maximum number of job recoveries exceeded for this pilot: %d" % (max_cleanups))
break
tolog("Processing job state file %d/%d: %s" % (file_number, number_of_files, file_path))
current_time = int(time.time())
# when was file last modified?
try:
file_modification_time = os.path.getmtime(file_path)
except:
# skip this file since it was not possible to read the modification time
pass
else:
# was the job state file updated longer than the time limit? (convert to seconds)
mod_time = current_time - file_modification_time
if mod_time > self.limit*3600:
tolog("File was last modified %d seconds ago (proceed)" % (mod_time))
cmd = "whoami; ls -lF %s; ls -lF %s" % (file_path, os.path.dirname(file_path))
tolog("Executing command: %s" % (cmd))
ec, rs = commands.getstatusoutput(cmd)
if ec == 0:
tolog("%s" % (rs))
else:
tolog("!!WARNING!!2999!! %d, %s" % (ec, rs))
# open the job state file
if JS.get(file_path):
# decode the job state info
_job, _site, _node, _recoveryAttempt = JS.decode()
# add member if it doesn't exist (new Job version)
try:
_tmp = _job.prodSourceLabel
except:
_job.prodSourceLabel = ''
if _job and _site and _node:
# query the job state file for job information
if _job.result[0] == 'running' or _job.result[0] == 'starting' or (_job.result[0] == 'holding' and mod_time > 7*24*3600):
if _job.result[0] == 'holding':
tolog("Job %s was found in %s state but has not been modified for a long time - will be cleaned up" % (_job.jobId, _job.result[0]))
else:
tolog("Job %s was found in %s state - will be cleaned up" % (_job.jobId, _job.result[0]))
tolog("Erasing directory: %s" % (_site.workdir))
cmd = "rm -rf %s" % (_site.workdir)
try:
ec, rs = commands.getstatusoutput(cmd)
except:
tolog("!!WARNING!!5500!! Could not erase lost job workdir: %d, %s" % (ec, rs))
status = False
break
else:
tolog("Lost job workdir removed")
else:
tolog("Job found in state: %s" % (_job.result[0]))
else:
tolog("File was last modified %d seconds ago (skip)" % (mod_time))
else:
tolog("No job state files were found, aborting clean-up")
else:
tolog("Clean-up turned off")
status = False
return status
def purgeEmptyDirs(path):
""" locate and remove empty lingering dirs """
all_dirs = glob("%s/Panda_Pilot_*" % (path))
max_dirs = 50
purged_nr = 0
dir_nr = 0
for _dir in all_dirs:
if dir_nr >= max_dirs:
break
# when was the dir last modified?
current_time = int(time.time())
try:
file_modification_time = os.path.getmtime(_dir)
except:
# skip this dir since it was not possible to read the modification time
pass
else:
mod_time = current_time - file_modification_time
if mod_time > 12*3600:
try:
ls = listdir(_dir)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s" % str(e))
else:
if len(ls) == 0 or len(ls) == 1:
if len(ls) == 0:
tolog("Found empty dir: %s (last modified %d s ago, will now purge it)" % (_dir, mod_time))
else:
tolog("Found empty dir: %s (last modified %d s ago, will now purge it, 1 sub dir: %s)" % (_dir, mod_time, ls[0]))
ec, rs = commands.getstatusoutput("rm -rf %s" % (_dir))
if ec != 0:
tolog("Failed to remove dir: %d, %s (belonging to user %d, pilot is run by user %d)" %\
(ec, rs, os.stat(_dir)[4], os.getuid()))
else:
purged_nr += 1
dir_nr += 1
tolog("Purged %d empty directories" % (purged_nr))
purgeEmptyDirs = staticmethod(purgeEmptyDirs)
def purgeWorkDirs(path):
""" locate and remove lingering athena workDirs """
all_dirs = glob("%s/Panda_Pilot_*/PandaJob*" % (path))
max_dirs = 50
purged_nr = 0
dir_nr = 0
for _dir in all_dirs:
if dir_nr >= max_dirs:
break
# when was the dir last modified?
current_time = int(time.time())
try:
file_modification_time = os.path.getmtime(_dir)
except:
# skip this dir since it was not possible to read the modification time
pass
else:
mod_time = current_time - file_modification_time
if mod_time > 12*3600:
try:
ls = listdir(_dir)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s" % str(e))
else:
if len(ls) == 1:
if "workDir" in ls:
ec, rs = commands.getstatusoutput("ls -lF %s" % (_dir))
tolog("ls: %s" % str(rs))
tolog("Found single workDir: %s (will now purge it)" % (_dir))
ec, rs = commands.getstatusoutput("rm -rf %s" % (_dir))
if ec != 0:
tolog("Failed to remove dir: %s" % (rs))
else:
purged_nr += 1
dir_nr += 1
tolog("Purged %d single workDirs directories" % (purged_nr))
purgeWorkDirs = staticmethod(purgeWorkDirs)
def purgeFiles(path, filename, limit=12*3600):
""" locate and remove lingering directories/files """
all_files = glob("%s/%s" % (path, filename))
max_files = 50
file_nr = 0
for _file in all_files:
if file_nr >= max_files:
break
# when was the dir last modified?
current_time = int(time.time())
try:
file_modification_time = os.path.getmtime(_file)
except:
# skip this dir since it was not possible to read the modification time
pass
else:
mod_time = current_time - file_modification_time
if mod_time > limit:
tolog("Found file %s last modified %d s ago (will now try to purge it)" % (_file, mod_time))
ec, rs = commands.getstatusoutput("rm -f %s" % (_file))
if ec != 0:
tolog("Failed to remove dir: %s" % (rs))
file_nr += 1
purgeFiles = staticmethod(purgeFiles)
def purgeMaxedoutDirs(path):
""" locate and remove maxedout lingering dirs """
all_dirs = glob("%s/Panda_Pilot_*" % (path))
max_dirs = 50
purged_nr = 0
dir_nr = 0
for _dir in all_dirs:
if dir_nr >= max_dirs:
break
# when was the dir last modified?
current_time = int(time.time())
try:
file_modification_time = os.path.getmtime(_dir)
except:
# skip this dir since it was not possible to read the modification time
pass
else:
mod_time = current_time - file_modification_time
if mod_time > 12*3600:
try:
ls = listdir(_dir)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s" % str(e))
else:
if len(ls) > 0:
purge = False
for f in ls:
if ".MAXEDOUT" in f:
tolog("Found MAXEDOUT job state file: %s (will now purge the work dir: %s)" % (f, _dir))
purge = True
break
if purge:
ec, rs = commands.getstatusoutput("rm -rf %s" % (_dir))
if ec != 0:
tolog("Failed to remove dir: %d, (belonging to user %d, pilot is run by user %d)" %\
(ec, os.stat(_dir)[4], os.getuid()))
else:
purged_nr += 1
dir_nr += 1
tolog("Purged %d empty directories" % (purged_nr))
purgeMaxedoutDirs = staticmethod(purgeMaxedoutDirs)
|
|
# -*- coding: utf-8 -*-
"""This is rule-based deduction system for SymPy
The whole thing is split into two parts
- rules compilation and preparation of tables
- runtime inference
For rule-based inference engines, the classical work is RETE algorithm [1],
[2] Although we are not implementing it in full (or even significantly)
it's still still worth a read to understand the underlying ideas.
In short, every rule in a system of rules is one of two forms:
- atom -> ... (alpha rule)
- And(atom1, atom2, ...) -> ... (beta rule)
The major complexity is in efficient beta-rules processing and usually for an
expert system a lot of effort goes into code that operates on beta-rules.
Here we take minimalistic approach to get something usable first.
- (preparation) of alpha- and beta- networks, everything except
- (runtime) FactRules.deduce_all_facts
_____________________________________
( Kirr: I've never thought that doing )
( logic stuff is that difficult... )
-------------------------------------
o ^__^
o (oo)\_______
(__)\ )\/\
||----w |
|| ||
Some references on the topic
----------------------------
[1] http://en.wikipedia.org/wiki/Rete_algorithm
[2] http://reports-archive.adm.cs.cmu.edu/anon/1995/CMU-CS-95-113.pdf
http://en.wikipedia.org/wiki/Propositional_formula
http://en.wikipedia.org/wiki/Inference_rule
http://en.wikipedia.org/wiki/List_of_rules_of_inference
"""
from __future__ import print_function, division
from collections import defaultdict
from .logic import Logic, And, Or, Not
from sympy.core.compatibility import string_types
def _base_fact(atom):
"""Return the literal fact of an atom.
Effectively, this merely strips the Not around a fact.
"""
if isinstance(atom, Not):
return atom.arg
else:
return atom
def _as_pair(atom):
if isinstance(atom, Not):
return (atom.arg, False)
else:
return (atom, True)
# XXX this prepares forward-chaining rules for alpha-network
def transitive_closure(implications):
"""
Computes the transitive closure of a list of implications
Uses Warshall's algorithm, as described at
http://chuck.ferzle.com/Notes/Notes/DiscreteMath/Warshall.pdf.
"""
full_implications = set(implications)
literals = set.union(*map(set, implications))
for k in literals:
for i in literals:
if (i, k) in full_implications:
for j in literals:
if (k, j) in full_implications:
full_implications.add((i, j))
return full_implications
def deduce_alpha_implications(implications):
"""deduce all implications
Description by example
----------------------
given set of logic rules:
a -> b
b -> c
we deduce all possible rules:
a -> b, c
b -> c
implications: [] of (a,b)
return: {} of a -> set([b, c, ...])
"""
implications = implications + [(Not(j), Not(i)) for (i, j) in implications]
res = defaultdict(set)
full_implications = transitive_closure(implications)
for a, b in full_implications:
if a == b:
continue # skip a->a cyclic input
res[a].add(b)
# Clean up tautologies and check consistency
for a, impl in res.items():
impl.discard(a)
na = Not(a)
if na in impl:
raise ValueError(
'implications are inconsistent: %s -> %s %s' % (a, na, impl))
return res
def apply_beta_to_alpha_route(alpha_implications, beta_rules):
"""apply additional beta-rules (And conditions) to already-built alpha implication tables
TODO: write about
- static extension of alpha-chains
- attaching refs to beta-nodes to alpha chains
e.g.
alpha_implications:
a -> [b, !c, d]
b -> [d]
...
beta_rules:
&(b,d) -> e
then we'll extend a's rule to the following
a -> [b, !c, d, e]
"""
x_impl = {}
for x in alpha_implications.keys():
x_impl[x] = (set(alpha_implications[x]), [])
for bcond, bimpl in beta_rules:
for bk in bcond.args:
if bk in x_impl:
continue
x_impl[bk] = (set(), [])
# static extensions to alpha rules:
# A: x -> a,b B: &(a,b) -> c ==> A: x -> a,b,c
seen_static_extension = True
while seen_static_extension:
seen_static_extension = False
for bcond, bimpl in beta_rules:
if not isinstance(bcond, And):
raise TypeError("Cond is not And")
bargs = set(bcond.args)
for x, (ximpls, bb) in x_impl.items():
x_all = ximpls | set([x])
# A: ... -> a B: &(...) -> a is non-informative
if bimpl not in x_all and bargs.issubset(x_all):
ximpls.add(bimpl)
# we introduced new implication - now we have to restore
# completness of the whole set.
bimpl_impl = x_impl.get(bimpl)
if bimpl_impl is not None:
ximpls |= bimpl_impl[0]
seen_static_extension = True
# attach beta-nodes which can be possibly triggered by an alpha-chain
for bidx, (bcond, bimpl) in enumerate(beta_rules):
bargs = set(bcond.args)
for x, (ximpls, bb) in x_impl.items():
x_all = ximpls | set([x])
# A: ... -> a B: &(...) -> a (non-informative)
if bimpl in x_all:
continue
# A: x -> a... B: &(!a,...) -> ... (will never trigger)
# A: x -> a... B: &(...) -> !a (will never trigger)
if any(Not(xi) in bargs or Not(xi) == bimpl for xi in x_all):
continue
if bargs & x_all:
bb.append(bidx)
return x_impl
def rules_2prereq(rules):
"""build prerequisites table from rules
Description by example
----------------------
given set of logic rules:
a -> b, c
b -> c
we build prerequisites (from what points something can be deduced):
b <- a
c <- a, b
rules: {} of a -> [b, c, ...]
return: {} of c <- [a, b, ...]
Note however, that this prerequisites may be *not* enough to prove a
fact. An example is 'a -> b' rule, where prereq(a) is b, and prereq(b)
is a. That's because a=T -> b=T, and b=F -> a=F, but a=F -> b=?
"""
prereq = defaultdict(set)
for (a, _), impl in rules.items():
if isinstance(a, Not):
a = a.args[0]
for (i, _) in impl:
if isinstance(i, Not):
i = i.args[0]
prereq[i].add(a)
return prereq
################
# RULES PROVER #
################
class TautologyDetected(Exception):
"""(internal) Prover uses it for reporting detected tautology"""
pass
class Prover(object):
"""ai - prover of logic rules
given a set of initial rules, Prover tries to prove all possible rules
which follow from given premises.
As a result proved_rules are always either in one of two forms: alpha or
beta:
Alpha rules
-----------
This are rules of the form::
a -> b & c & d & ...
Beta rules
----------
This are rules of the form::
&(a,b,...) -> c & d & ...
i.e. beta rules are join conditions that say that something follows when
*several* facts are true at the same time.
"""
def __init__(self):
self.proved_rules = []
self._rules_seen = set()
def split_alpha_beta(self):
"""split proved rules into alpha and beta chains"""
rules_alpha = [] # a -> b
rules_beta = [] # &(...) -> b
for a, b in self.proved_rules:
if isinstance(a, And):
rules_beta.append((a, b))
else:
rules_alpha.append((a, b) )
return rules_alpha, rules_beta
@property
def rules_alpha(self):
return self.split_alpha_beta()[0]
@property
def rules_beta(self):
return self.split_alpha_beta()[1]
def process_rule(self, a, b):
"""process a -> b rule""" # TODO write more?
if (not a) or isinstance(b, bool):
return
if isinstance(a, bool):
return
if (a, b) in self._rules_seen:
return
else:
self._rules_seen.add((a, b))
# this is the core of processing
try:
self._process_rule(a, b)
except TautologyDetected:
pass
def _process_rule(self, a, b):
# right part first
# a -> b & c --> a -> b ; a -> c
# (?) FIXME this is only correct when b & c != null !
if isinstance(b, And):
for barg in b.args:
self.process_rule(a, barg)
# a -> b | c --> !b & !c -> !a
# --> a & !b -> c
# --> a & !c -> b
elif isinstance(b, Or):
# detect tautology first
if not isinstance(a, Logic): # Atom
# tautology: a -> a|c|...
if a in b.args:
raise TautologyDetected(a, b, 'a -> a|c|...')
self.process_rule(And(*[Not(barg) for barg in b.args]), Not(a))
for bidx in range(len(b.args)):
barg = b.args[bidx]
brest = b.args[:bidx] + b.args[bidx + 1:]
self.process_rule(And(a, Not(barg)), Or(*brest))
# left part
# a & b -> c --> IRREDUCIBLE CASE -- WE STORE IT AS IS
# (this will be the basis of beta-network)
elif isinstance(a, And):
if b in a.args:
raise TautologyDetected(a, b, 'a & b -> a')
self.proved_rules.append((a, b))
# XXX NOTE at present we ignore !c -> !a | !b
elif isinstance(a, Or):
if b in a.args:
raise TautologyDetected(a, b, 'a | b -> a')
for aarg in a.args:
self.process_rule(aarg, b)
else:
# both `a` and `b` are atoms
self.proved_rules.append((a, b)) # a -> b
self.proved_rules.append((Not(b), Not(a))) # !b -> !a
########################################
class FactRules(object):
"""Rules that describe how to deduce facts in logic space
When defined, these rules allow implications to quickly be determined for a
set of facts. For this precomputed deduction tables are used. see
`deduce_all_facts` (forward-chaining)
Also it is possible to gather prerequisites for a fact, which is tried
to be proven. (backward-chaining)
Definition Syntax
-----------------
a -> b -- a=T -> b=T (and automatically b=F -> a=F)
a -> !b -- a=T -> b=F
a == b -- a -> b & b -> a
a -> b & c -- a=T -> b=T & c=T
# TODO b | c
Internals
---------
.full_implications[k, v]: all the implications of fact k=v
.beta_triggers[k, v]: beta rules that might be triggered when k=v
.prereq -- {} k <- [] of k's prerequisites
.defined_facts -- set of defined fact names
"""
def __init__(self, rules):
"""Compile rules into internal lookup tables"""
if isinstance(rules, string_types):
rules = rules.splitlines()
# --- parse and process rules ---
P = Prover()
for rule in rules:
# XXX `a` is hardcoded to be always atom
a, op, b = rule.split(None, 2)
a = Logic.fromstring(a)
b = Logic.fromstring(b)
if op == '->':
P.process_rule(a, b)
elif op == '==':
P.process_rule(a, b)
P.process_rule(b, a)
else:
raise ValueError('unknown op %r' % op)
# --- build deduction networks ---
self.beta_rules = []
for bcond, bimpl in P.rules_beta:
self.beta_rules.append(
(set(_as_pair(a) for a in bcond.args), _as_pair(bimpl)))
# deduce alpha implications
impl_a = deduce_alpha_implications(P.rules_alpha)
# now:
# - apply beta rules to alpha chains (static extension), and
# - further associate beta rules to alpha chain (for inference at runtime)
impl_ab = apply_beta_to_alpha_route(impl_a, P.rules_beta)
# extract defined fact names
self.defined_facts = set(_base_fact(k) for k in impl_ab.keys())
# build rels (forward chains)
full_implications = defaultdict(set)
beta_triggers = defaultdict(set)
for k, (impl, betaidxs) in impl_ab.items():
full_implications[_as_pair(k)] = set(_as_pair(i) for i in impl)
beta_triggers[_as_pair(k)] = betaidxs
self.full_implications = full_implications
self.beta_triggers = beta_triggers
# build prereq (backward chains)
prereq = defaultdict(set)
rel_prereq = rules_2prereq(full_implications)
for k, pitems in rel_prereq.items():
prereq[k] |= pitems
self.prereq = prereq
class InconsistentAssumptions(ValueError):
def __str__(self):
kb, fact, value = self.args
return "%s, %s=%s" % (kb, fact, value)
class FactKB(dict):
"""
A simple propositional knowledge base relying on compiled inference rules.
"""
def __str__(self):
return '{\n%s}' % ',\n'.join(
["\t%s: %s" % i for i in sorted(self.items())])
def __init__(self, rules):
self.rules = rules
def _tell(self, k, v):
"""Add fact k=v to the knowledge base.
Returns True if the KB has actually been updated, False otherwise.
"""
if k in self and self[k] is not None:
if self[k] == v:
return False
else:
raise InconsistentAssumptions(self, k, v)
else:
self[k] = v
return True
# *********************************************
# * This is the workhorse, so keep it *fast*. *
# *********************************************
def deduce_all_facts(self, facts):
"""
Update the KB with all the implications of a list of facts.
Facts can be specified as a dictionary or as a list of (key, value)
pairs.
"""
# keep frequently used attributes locally, so we'll avoid extra
# attribute access overhead
full_implications = self.rules.full_implications
beta_triggers = self.rules.beta_triggers
beta_rules = self.rules.beta_rules
if isinstance(facts, dict):
facts = facts.items()
while facts:
beta_maytrigger = set()
# --- alpha chains ---
for k, v in facts:
if not self._tell(k, v) or v is None:
continue
# lookup routing tables
for key, value in full_implications[k, v]:
self._tell(key, value)
beta_maytrigger.update(beta_triggers[k, v])
# --- beta chains ---
facts = []
for bidx in beta_maytrigger:
bcond, bimpl = beta_rules[bidx]
if all(self.get(k) is v for k, v in bcond):
facts.append(bimpl)
|
|
from __future__ import unicode_literals
import requests
import time
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from .exceptions import (
PageError, DisambiguationError, RedirectError, HTTPTimeoutError,
WikipediaException, ODD_ERROR_MESSAGE)
from .util import cache, stdout_encode, debug
API_URL = 'http://en.wikipedia.org/w/api.php'
RATE_LIMIT = False
RATE_LIMIT_MIN_WAIT = None
RATE_LIMIT_LAST_CALL = None
USER_AGENT = 'wikipedia (https://github.com/goldsmith/Wikipedia/)'
def set_lang(prefix):
'''
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
'''
global API_URL
API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php'
for cached_func in (search, suggest, summary):
cached_func.clear_cache()
def set_user_agent(user_agent_string):
'''
Set the User-Agent string to be used for all requests.
Arguments:
* user_agent_string - (string) a string specifying the User-Agent header
'''
global USER_AGENT
USER_AGENT = user_agent_string
def set_rate_limiting(rate_limit, min_wait=timedelta(milliseconds=50)):
'''
Enable or disable rate limiting on requests to the Mediawiki servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikipedia, the number of requests you and other `wikipedia` users
are making, and other factors), Wikipedia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* rate_limit - (Boolean) whether to enable rate limiting or not
Keyword arguments:
* min_wait - if rate limiting is enabled, `min_wait` is a timedelta describing the minimum time to wait before requests.
Defaults to timedelta(milliseconds=50)
'''
global RATE_LIMIT
global RATE_LIMIT_MIN_WAIT
global RATE_LIMIT_LAST_CALL
RATE_LIMIT = rate_limit
if not rate_limit:
RATE_LIMIT_MIN_WAIT = None
else:
RATE_LIMIT_MIN_WAIT = min_wait
RATE_LIMIT_LAST_CALL = None
@cache
def search(query, results=10, suggestion=False):
'''
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
'''
search_params = {
'list': 'search',
'srprop': '',
'srlimit': results,
'limit': results,
'srsearch': query
}
if suggestion:
search_params['srinfo'] = 'suggestion'
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return list(search_results), raw_results['query']['searchinfo']['suggestion']
else:
return list(search_results), None
return list(search_results)
@cache
def geosearch(latitude, longitude, title=None, results=10, radius=1000):
'''
Do a wikipedia geo search for `latitude` and `longitude`
using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData
Arguments:
* latitude (float or decimal.Decimal)
* longitude (float or decimal.Decimal)
Keyword arguments:
* title - The title of an article to search for
* results - the maximum number of results returned
* radius - Search radius in meters. The value must be between 10 and 10000
'''
search_params = {
'list': 'geosearch',
'gsradius': radius,
'gscoord': '{0}|{1}'.format(latitude, longitude),
'gslimit': results
}
if title:
search_params['titles'] = title
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude))
else:
raise WikipediaException(raw_results['error']['info'])
search_pages = raw_results['query'].get('pages', None)
if search_pages:
search_results = (v['title'] for k, v in search_pages.items() if k != '-1')
else:
search_results = (d['title'] for d in raw_results['query']['geosearch'])
return list(search_results)
@cache
def suggest(query):
'''
Get a Wikipedia search suggestion for `query`.
Returns a string or None if no suggestion was found.
'''
search_params = {
'list': 'search',
'srinfo': 'suggestion',
'srprop': '',
}
search_params['srsearch'] = query
raw_result = _wiki_request(search_params)
if raw_result['query'].get('searchinfo'):
return raw_result['query']['searchinfo']['suggestion']
return None
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(title, sentences=0, chars=0, auto_suggest=True, redirect=True):
'''
Plain text summary of the page.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default
Keyword arguments:
* sentences - if set, return the first `sentences` sentences (can be no greater than 10).
* chars - if set, return only the first `chars` characters (actual text returned may be slightly longer).
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
'''
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(title, auto_suggest=auto_suggest, redirect=redirect)
title = page_info.title
pageid = page_info.pageid
query_params = {
'prop': 'extracts',
'explaintext': '',
'titles': title
}
if sentences:
query_params['exsentences'] = sentences
elif chars:
query_params['exchars'] = chars
else:
query_params['exintro'] = ''
request = _wiki_request(query_params)
summary = request['query']['pages'][pageid]['extract']
return summary
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
'''
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
class WikipediaPage(object):
'''
Contains data from a Wikipedia page.
Uses property methods to filter data from the raw HTML.
'''
def __init__(self, title=None, pageid=None, redirect=True, preload=False, original_title=''):
if title is not None:
self.title = title
self.original_title = original_title or title
elif pageid is not None:
self.pageid = pageid
else:
raise ValueError("Either a title or a pageid must be specified")
self.__load(redirect=redirect, preload=preload)
if preload:
for prop in ('content', 'summary', 'images', 'references', 'links', 'sections'):
getattr(self, prop)
def __repr__(self):
return stdout_encode(u'<WikipediaPage \'{}\'>'.format(self.title))
def __eq__(self, other):
try:
return (
self.pageid == other.pageid
and self.title == other.title
and self.url == other.url
)
except:
return False
def __load(self, redirect=True, preload=False):
'''
Load basic information from Wikipedia.
Confirm that page exists and is not a disambiguation/redirect.
Does not need to be called manually, should be called automatically during __init__.
'''
query_params = {
'prop': 'info|pageprops',
'inprop': 'url',
'ppprop': 'disambiguation',
'redirects': '',
}
if not getattr(self, 'pageid', None):
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
query = request['query']
pageid = list(query['pages'].keys())[0]
page = query['pages'][pageid]
# missing is present if the page is missing
if 'missing' in page:
if hasattr(self, 'title'):
raise PageError(self.title)
else:
raise PageError(pageid=self.pageid)
# same thing for redirect, except it shows up in query instead of page for
# whatever silly reason
elif 'redirects' in query:
if redirect:
redirects = query['redirects'][0]
if 'normalized' in query:
normalized = query['normalized'][0]
assert normalized['from'] == self.title, ODD_ERROR_MESSAGE
from_title = normalized['to']
else:
from_title = self.title
assert redirects['from'] == from_title, ODD_ERROR_MESSAGE
# change the title and reload the whole object
self.__init__(redirects['to'], redirect=redirect, preload=preload)
else:
raise RedirectError(getattr(self, 'title', page['title']))
# since we only asked for disambiguation in ppprop,
# if a pageprop is returned,
# then the page must be a disambiguation page
elif 'pageprops' in page:
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvparse': '',
'rvlimit': 1
}
if hasattr(self, 'pageid'):
query_params['pageids'] = self.pageid
else:
query_params['titles'] = self.title
request = _wiki_request(query_params)
html = request['query']['pages'][pageid]['revisions'][0]['*']
lis = BeautifulSoup(html).find_all('li')
filtered_lis = [li for li in lis if not 'tocsection' in ''.join(li.get('class', []))]
may_refer_to = [li.a.get_text() for li in filtered_lis if li.a]
raise DisambiguationError(getattr(self, 'title', page['title']), may_refer_to)
else:
self.pageid = pageid
self.title = page['title']
self.url = page['fullurl']
def __continued_query(self, query_params):
'''
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
'''
query_params.update(self.__title_query_param)
last_continue = {}
prop = query_params.get('prop', None)
while True:
params = query_params.copy()
params.update(last_continue)
request = _wiki_request(params)
if 'query' not in request:
break
pages = request['query']['pages']
if 'generator' in query_params:
for datum in pages.values(): # in python 3.3+: "yield from pages.values()"
yield datum
else:
for datum in pages[self.pageid][prop]:
yield datum
if 'continue' not in request:
break
last_continue = request['continue']
@property
def __title_query_param(self):
if getattr(self, 'title', None) is not None:
return {'titles': self.title}
else:
return {'pageids': self.pageid}
def html(self):
'''
Get full page HTML.
.. warning:: This can get pretty slow on long pages.
'''
if not getattr(self, '_html', False):
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvlimit': 1,
'rvparse': '',
'titles': self.title
}
request = _wiki_request(query_params)
self._html = request['query']['pages'][self.pageid]['revisions'][0]['*']
return self._html
@property
def content(self):
'''
Plain text content of the page, excluding images, tables, and other data.
'''
if not getattr(self, '_content', False):
query_params = {
'prop': 'extracts|revisions',
'explaintext': '',
'rvprop': 'ids'
}
if not getattr(self, 'title', None) is None:
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
self._content = request['query']['pages'][self.pageid]['extract']
self._revision_id = request['query']['pages'][self.pageid]['revisions'][0]['revid']
self._parent_id = request['query']['pages'][self.pageid]['revisions'][0]['parentid']
return self._content
@property
def revision_id(self):
'''
Revision ID of the page.
The revision ID is a number that uniquely identifies the current
version of the page. It can be used to create the permalink or for
other direct API calls. See `Help:Page history
<http://en.wikipedia.org/wiki/Wikipedia:Revision>`_ for more
information.
'''
if not getattr(self, '_revid', False):
# fetch the content (side effect is loading the revid)
self.content
return self._revision_id
@property
def parent_id(self):
'''
Revision ID of the parent version of the current revision of this
page. See ``revision_id`` for more information.
'''
if not getattr(self, '_parentid', False):
# fetch the content (side effect is loading the revid)
self.content
return self._parent_id
@property
def summary(self):
'''
Plain text summary of the page.
'''
if not getattr(self, '_summary', False):
query_params = {
'prop': 'extracts',
'explaintext': '',
'exintro': '',
}
if not getattr(self, 'title', None) is None:
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
self._summary = request['query']['pages'][self.pageid]['extract']
return self._summary
@property
def images(self):
'''
List of URLs of images on the page.
'''
if not getattr(self, '_images', False):
self._images = [
page['imageinfo'][0]['url']
for page in self.__continued_query({
'generator': 'images',
'gimlimit': 'max',
'prop': 'imageinfo',
'iiprop': 'url',
})
if 'imageinfo' in page
]
return self._images
@property
def coordinates(self):
'''
Tuple of Decimals in the form of (lat, lon) or None
'''
if not getattr(self, '_coordinates', False):
query_params = {
'prop': 'coordinates',
'colimit': 'max',
'titles': self.title,
}
request = _wiki_request(query_params)
if 'query' in request:
coordinates = request['query']['pages'][self.pageid]['coordinates']
self._coordinates = (Decimal(coordinates[0]['lat']), Decimal(coordinates[0]['lon']))
else:
self._coordinates = None
return self._coordinates
@property
def references(self):
'''
List of URLs of external links on a page.
May include external links within page that aren't technically cited anywhere.
'''
if not getattr(self, '_references', False):
def add_protocol(url):
return url if url.startswith('http') else 'http:' + url
self._references = [
add_protocol(link['*'])
for link in self.__continued_query({
'prop': 'extlinks',
'ellimit': 'max'
})
]
return self._references
@property
def links(self):
'''
List of titles of Wikipedia page links on a page.
.. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
'''
if not getattr(self, '_links', False):
self._links = [
link['title']
for link in self.__continued_query({
'prop': 'links',
'plnamespace': 0,
'pllimit': 'max'
})
]
return self._links
@property
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'parse',
'prop': 'sections',
}
query_params.update(self.__title_query_param)
request = _wiki_request(query_params)
self._sections = [section['line'] for section in request['parse']['sections']]
return self._sections
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
section = u"== {} ==".format(section_title)
try:
index = self.content.index(section) + len(section)
except ValueError:
return None
try:
next_index = self.content.index("==", index)
except ValueError:
next_index = len(self.content)
return self.content[index:next_index].lstrip("=").strip()
@cache
def languages():
'''
List all the currently supported language prefixes (usually ISO language code).
Can be inputted to `set_lang` to change the Mediawiki that `wikipedia` requests
results from.
Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes,
use `wikipedia.languages().keys()`.
'''
response = _wiki_request({
'meta': 'siteinfo',
'siprop': 'languages'
})
languages = response['query']['languages']
return {
lang['code']: lang['*']
for lang in languages
}
def donate():
'''
Open up the Wikimedia donate page in your favorite browser.
'''
import webbrowser
webbrowser.open('https://donate.wikimedia.org/w/index.php?title=Special:FundraiserLandingPage', new=2)
def _wiki_request(params):
'''
Make a request to the Wikipedia API using the given search parameters.
Returns a parsed dict of the JSON response.
'''
global RATE_LIMIT_LAST_CALL
global USER_AGENT
params['format'] = 'json'
if not 'action' in params:
params['action'] = 'query'
headers = {
'User-Agent': USER_AGENT
}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \
RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now():
# it hasn't been long enough since the last API call
# so wait until we're in the clear to make the request
wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(API_URL, params=params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
return r.json()
|
|
# Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test gradual quantization noise injection with quantizers of quantizers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import logging
from numpy.testing import assert_allclose
from numpy.testing import assert_equal
import pytest
from tensorflow.keras import backend as K
from qkeras.quantizers import quantized_bits
from qkeras.quantizers import quantized_relu
def test_qnoise_quantized_bits():
# 1 sign bit, 1 integer bit, and 2 fractional bits.
bits = 4
integer = 1
symmetric = True
keep_negative = True
alpha = 1
use_stochastic_rounding = False
qb = quantized_bits(
bits=bits,
integer=integer,
symmetric=symmetric,
keep_negative=keep_negative,
alpha=alpha,
use_stochastic_rounding=use_stochastic_rounding,
use_variables=True)
inputs = np.array([0.0, 0.5, -0.5, 0.6, -0.6, 2.0, -2.0], dtype=np.float32)
x = np.array([0.0, 0.5, -0.5, 0.6, -0.6, 2.0, -2.0], dtype=np.float32)
xq = np.array([0.0, 0.5, -0.5, 0.5, -0.5, 1.75, -1.75], dtype=np.float32)
x_xq = 0.5 * (x + xq)
# no quantization
qb.update_qnoise_factor(qnoise_factor=0.0)
x_q_0 = qb(inputs)
assert_equal(x_q_0, x)
# full quantization
qb.update_qnoise_factor(qnoise_factor=1.0)
x_q_1 = qb(inputs)
assert_equal(x_q_1, xq)
# mixing half and half of x and xq
qb.update_qnoise_factor(qnoise_factor=0.5)
x_q_05 = qb(inputs)
assert_equal(x_q_05, x_xq)
def test_qnoise_quantized_relu():
# 0 sign bit, 1 integer bit, and 3 fractional bits.
bits = 4
integer = 1
use_sigmoid = False
negative_slope = 0
use_stochastic_rounding = False
# input to quantized relu
inputs = np.array([0.0, 0.5, -0.5, 0.6, 2.0, 3.0], dtype=np.float32)
# float relu
x = np.array([0.0, 0.5, 0.0, 0.6, 2.0, 3.0], dtype=np.float32)
# float relu with upper bound 1.5
x_ub = np.array([0.0, 0.5, 0.0, 0.6, 1.5, 1.5], dtype=np.float32)
# float relu with quantized clipping
x_clipped = np.array([0.0, 0.5, 0.0, 0.6, 1.875, 1.875], dtype=np.float32)
# quantized relu
xq = np.array([0.0, 0.5, 0.0, 0.625, 1.875, 1.875], dtype=np.float32)
# mixing half and half
x_xq = 0.5 * (x + xq)
x_clipped_xq = 0.5 * (x_clipped + xq)
x_ub_xq = 0.5 * (x_ub + xq)
#########################################
# No relu upper bound
# No quantized clip for float relu
#########################################
qr_qc_false = quantized_relu(
bits=bits,
integer=integer,
use_sigmoid=use_sigmoid,
negative_slope=negative_slope,
use_stochastic_rounding=use_stochastic_rounding,
relu_upper_bound=None,
is_quantized_clip=False,
use_variables=True)
# no quantization
qr_qc_false.update_qnoise_factor(qnoise_factor=0.0)
x_q_0 = qr_qc_false(inputs)
assert_equal(x_q_0, x)
# full quantization
qr_qc_false.update_qnoise_factor(qnoise_factor=1.0)
x_q_1 = qr_qc_false(inputs)
assert_equal(x_q_1, xq)
# mixing half and half
qr_qc_false.update_qnoise_factor(qnoise_factor=0.5)
x_q_05 = qr_qc_false(inputs)
assert_equal(x_q_05, x_xq)
#########################################
# No relu upper bound
# Quantized clip for float relu
#########################################
qr_qc_true = quantized_relu(
bits=bits,
integer=integer,
use_sigmoid=use_sigmoid,
negative_slope=negative_slope,
use_stochastic_rounding=use_stochastic_rounding,
relu_upper_bound=None,
is_quantized_clip=True,
use_variables=True)
# no quantization
qr_qc_true.update_qnoise_factor(qnoise_factor=0.0)
x_q_0 = qr_qc_true(inputs)
assert_equal(x_q_0, x_clipped)
# full quantization
qr_qc_true.update_qnoise_factor(qnoise_factor=1.0)
x_q_1 = qr_qc_true(inputs)
assert_equal(x_q_1, xq)
# mixing half and half
qr_qc_true.update_qnoise_factor(qnoise_factor=0.5)
x_q_05 = qr_qc_true(inputs)
assert_equal(x_q_05, x_clipped_xq)
#########################################
# Relu upper bound
# No quantized clip for float relu
#########################################
qr_ub_qc_false = quantized_relu(
bits=bits,
integer=integer,
use_sigmoid=use_sigmoid,
negative_slope=negative_slope,
use_stochastic_rounding=use_stochastic_rounding,
relu_upper_bound=1.5,
is_quantized_clip=False,
use_variables=True)
# no quantization
qr_ub_qc_false.update_qnoise_factor(qnoise_factor=0.0)
x_q_0 = qr_ub_qc_false(inputs)
assert_equal(x_q_0, np.clip(x_ub, a_min=None, a_max=1.5))
# full quantization
qr_ub_qc_false.update_qnoise_factor(qnoise_factor=1.0)
x_q_1 = qr_ub_qc_false(inputs)
assert_equal(x_q_1, np.clip(xq, a_min=None, a_max=1.5))
# mixing half and half
qr_ub_qc_false.update_qnoise_factor(qnoise_factor=0.5)
x_q_05 = qr_ub_qc_false(inputs)
assert_equal(x_q_05, np.clip(x_ub_xq, a_min=None, a_max=1.5))
#########################################
# Relu upper bound
# Quantized clip for float relu
# (The quantized clip has precedence over the relu upper bound.)
#########################################
qr_ub_qc_true = quantized_relu(
bits=bits,
integer=integer,
use_sigmoid=use_sigmoid,
negative_slope=negative_slope,
use_stochastic_rounding=use_stochastic_rounding,
relu_upper_bound=1.5,
is_quantized_clip=True,
use_variables=True)
# no quantization
qr_ub_qc_true.update_qnoise_factor(qnoise_factor=0.0)
x_q_0 = qr_ub_qc_true(inputs)
assert_equal(x_q_0, x_clipped)
# full quantization
qr_ub_qc_true.update_qnoise_factor(qnoise_factor=1.0)
x_q_1 = qr_ub_qc_true(inputs)
assert_equal(x_q_1, xq)
# mixing half and half
qr_ub_qc_true.update_qnoise_factor(qnoise_factor=0.5)
x_q_05 = qr_ub_qc_true(inputs)
assert_equal(x_q_05, x_clipped_xq)
if __name__ == "__main__":
pytest.main([__file__])
|
|
import gc
import sys
import time
import logging
from ctypes import *
import numpy as np
from numpy.ctypeslib import ndpointer
from .backend import Backend
log = logging.getLogger(__name__)
cudart = cdll.LoadLibrary("libcudart.so")
cusparse = cdll.LoadLibrary("libcusparse.so")
cufft = cdll.LoadLibrary("libcufft.so")
cublas = cdll.LoadLibrary("libcublas.so")
nvtx = cdll.LoadLibrary("libnvToolsExt.so")
class c_complex(c_float * 2):
def __init__(self, a):
super().__init__()
self[0] = a.real
self[1] = a.imag
class CudaBackend(Backend):
def __init__(self, device_id=0):
super(CudaBackend, self).__init__()
self._cublas_handle = self.cublasHandle_t(self)
self._cusparse_handle = self.cusparseHandle_t(self)
self._mat_descr = self.cusparseMatDescr_t(self)
self.cudaSetDevice( device_id )
cu_device = c_int()
self.cudaGetDevice( byref(cu_device) )
log.info("using CUDA device #%d", cu_device.value)
self._plans = dict()
class cudaError_t(c_long):
def _check(self, backend, fn):
if self.value != 0:
name = backend.cudaGetErrorName( self ).decode('ascii')
desc = backend.cudaGetErrorString( self ).decode('ascii')
log.critical("%s returned exit code %d: %s (%s)",
fn.__name__, self.value, name, desc)
raise RuntimeError
def wrap(lib):
def wrapper(fn):
libfn = getattr(lib, fn.__name__)
libfn.argtypes = [fn.__annotations__[arg] for arg in fn.__code__.co_varnames]
libfn.restype = fn.__annotations__['return']
def wrapped(backend, *args, **kwargs):
res = libfn(*args, **kwargs)
if hasattr(res, '_check'):
res._check(backend, fn)
return res
return wrapped
return wrapper
@wrap(cudart)
def cudaSetDevice(device : c_int) -> cudaError_t:
pass
@wrap(cudart)
def cudaGetDevice(device : POINTER(c_int)) -> cudaError_t:
pass
@wrap(cudart)
def cudaGetErrorName( err : cudaError_t ) -> c_char_p:
pass
@wrap(cudart)
def cudaGetErrorString( err : cudaError_t ) -> c_char_p:
pass
@wrap(cudart)
def cudaGetDeviceCount(count: POINTER(c_int)) -> cudaError_t:
pass
@wrap(cudart)
def cudaDeviceSynchronize() -> cudaError_t:
pass
@wrap(cudart)
def cudaMalloc(devPtr : POINTER(c_ulong), size : c_size_t) -> cudaError_t:
pass
@wrap(cudart)
def cudaFree(devPtr : c_ulong) -> cudaError_t:
pass
@wrap(cudart)
def cudaMemcpy( dst:c_ulong, src:c_ulong, count:c_size_t, kind:c_int) -> cudaError_t:
pass
@wrap(cudart)
def cudaMemcpy2D( dst:c_ulong, dpitch:c_size_t,
src:c_ulong, spitch:c_size_t,
width:c_size_t, height:c_size_t, kind:c_int) -> cudaError_t:
pass
@wrap(cudart)
def cudaMemset(
devPtr : c_ulong,
size : c_int,
count : c_size_t
) -> cudaError_t:
pass
cudaMemcpy.HostToHost = 0
cudaMemcpy.HostToDevice = 1
cudaMemcpy.DeviceToHost = 2
cudaMemcpy.DeviceToDevice = 3
cudaMemcpy.Default = 4
def barrier(self):
self.cudaDeviceSynchronize()
# -----------------------------------------------------------------------
# Arrays
# -----------------------------------------------------------------------
class dndarray(Backend.dndarray):
def _copy_from(self, arr):
assert arr.flags['F_CONTIGUOUS']
src, dst = arr.ctypes.data, self._arr
if self.ndim == 2:
spitch = arr.shape[0] * arr.dtype.itemsize
dpitch = self._leading_dim * self.itemsize
width, height = self.shape[0] * self.dtype.itemsize, self.shape[1]
self._backend.cudaMemcpy2D(dst, dpitch, src, spitch, width, height,
CudaBackend.cudaMemcpy.HostToDevice)
else:
assert self.contiguous
size, kind = arr.nbytes, CudaBackend.cudaMemcpy.HostToDevice
self._backend.cudaMemcpy(dst, src, size, kind)
def _copy_to(self, arr):
assert arr.flags['F_CONTIGUOUS']
src, dst = self._arr, arr.ctypes.data
if self.ndim == 2:
spitch = self._leading_dim * self.itemsize
dpitch = arr.shape[0] * arr.dtype.itemsize
width, height = self.shape[0] * self.dtype.itemsize, self.shape[1]
self._backend.cudaMemcpy2D(dst, dpitch, src, spitch, width, height,
CudaBackend.cudaMemcpy.DeviceToHost)
else:
assert self.contiguous
size, kind = arr.nbytes, CudaBackend.cudaMemcpy.DeviceToHost
self._backend.cudaMemcpy(dst, src, size, kind)
def _copy(self, d_arr):
src, dst = d_arr._arr, self._arr
if self.ndim == 2:
spitch = d_arr._leading_dim * d_arr.itemsize
dpitch = self._leading_dim * self.itemsize
width, height = self.shape[0] * self.dtype.itemsize, self.shape[1]
self._backend.cudaMemcpy2D(dst, dpitch, src, spitch, width, height,
CudaBackend.cudaMemcpy.DeviceToDevice)
else:
assert self.contiguous
size, kind = self.nbytes, CudaBackend.cudaMemcpy.DeviceToDevice
self._backend.cudaMemcpy(dst, src, size, kind)
def _malloc(self, shape, dtype):
align = 256
self._fullarr = c_ulong()
self._backend.cudaMalloc( byref(self._fullarr), self.nbytes + align)
_arr = self._fullarr.value
while _arr % align != 0:
_arr += 1
return c_ulong(_arr)
def _free(self):
self._backend.cudaFree( self._fullarr )
def _zero(self):
self._backend.cudaMemset( self._arr, 0, self.nbytes )
def __getitem__(self, slc):
if isinstance(slc, slice):
slc = [slc]
start, shape = [], []
for s, n in zip(slc, self.shape):
if isinstance(s, int):
s = slice(s, s+1)
b, e = s.start or 0, s.stop or n
if b < 0: b = n+b # remove neg begining indices
if e < 0: e = n+e # remove neg ending indices
if e < b: e = b # disallow negative sizes
if e > n: e = n # fix over-slices
start.append(b)
shape.append(e-b)
idx = np.ravel_multi_index(start, self.shape, order='F')
ptr = self._arr.value + idx * np.dtype(self.dtype).itemsize
ptr = c_ulong(ptr)
ld = self._leading_dim
return self._backend.dndarray(self._backend, tuple(shape),
self.dtype, ld=ld, own=False, data=ptr)
@staticmethod
def from_param(obj):
if not isinstance(obj, CudaBackend.dndarray):
raise ArgumentError('{} is not a dndarray'.format( type(obj) ))
return obj._arr
# -----------------------------------------------------------------------
# BLAS Routines
# -----------------------------------------------------------------------
class cublasHandle_t(c_void_p):
def __init__(self, backend):
super().__init__()
self._backend = backend
self._backend.cublasCreate_v2( byref(self) )
def __del__(self):
self._backend.cublasDestroy_v2( self )
class cublasStatus_t(c_int):
def _check(self, backend, fn):
if self.value != 0:
log.critical("%s returned exit code %d", fn.__name__, self.value)
raise RuntimeError
@wrap(cublas)
def cublasCreate_v2( handle : POINTER(cublasHandle_t) ) -> cublasStatus_t:
pass
@wrap(cublas)
def cublasDestroy_v2( handle : cublasHandle_t ) -> cublasStatus_t:
pass
def axpby(self, beta, y, alpha, x):
""" y += alpha * x """
assert isinstance(x, self.dndarray)
assert isinstance(y, self.dndarray)
alpha = np.array(alpha, dtype=np.complex64)
if beta != 1: # y *= beta
beta = np.array( beta, dtype=np.complex64)
self.cublasCscal_v2( self._cublas_handle, y.size, beta, y._arr, 1 )
if alpha != 0:
self.cublasCaxpy_v2( self._cublas_handle, y.size, alpha, x._arr, 1, y._arr, 1 )
@wrap(cublas)
def cublasCaxpy_v2(
handle : cublasHandle_t,
n : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
x : c_ulong, incx : c_int,
y : c_ulong, incy : c_int,
) -> cublasStatus_t:
pass
def dot(self, x, y):
""" returns x^T * y """
assert isinstance(x, self.dndarray)
assert isinstance(y, self.dndarray)
dotc = np.array(0, dtype=np.complex64)
self.cublasCdotc_v2( self._cublas_handle, x.size,
x._arr, 1, y._arr, 1, dotc)
return dotc.real
@wrap(cublas)
def cublasCdotc_v2(
handle : cublasHandle_t,
n : c_int,
x : c_ulong,
incx : c_int,
y : c_ulong,
incy : c_int,
result : ndpointer(dtype=np.complex64, ndim=0),
) -> cublasStatus_t:
pass
def norm2(self, x):
""" returns ||x||_2"""
assert isinstance(x, self.dndarray)
result = np.array(0, dtype=np.complex64)
self.cublasScnrm2_v2( self._cublas_handle, x.size, x._arr, 1, result )
return result**2
@wrap(cublas)
def cublasScnrm2_v2(
handle : cublasHandle_t,
n : c_int,
x : c_ulong,
incx : c_int,
result : ndpointer(dtype=np.complex64, ndim=0),
) -> cublasStatus_t:
pass
def scale(self, x, alpha):
""" x *= alpha """
assert isinstance(x, self.dndarray)
alpha = np.array(alpha, dtype=np.complex64)
self.cublasCscal_v2( self._cublas_handle, x.size, alpha, x._arr, 1 )
@wrap(cublas)
def cublasCscal_v2(
handle : cublasHandle_t,
n : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
x : c_ulong,
incx : c_int,
) -> cublasStatus_t:
pass
def cgemm(self, y, M, x, alpha, beta, forward):
""" y = alpha * M * X + beta * Y """
assert isinstance(x, self.dndarray)
alpha = np.array(alpha, dtype=np.complex64)
beta = np.array( beta, dtype=np.complex64)
(m, n), k = y.shape, x.shape[0]
lda = M.shape[0]
ldb = x.shape[0]
ldc = y.shape[0]
if forward:
transa = CudaBackend.cublasOperator_t.CUBLAS_OP_N
else:
transa = CudaBackend.cublasOperator_t.CUBLAS_OP_C
transb = CudaBackend.cublasOperator_t.CUBLAS_OP_N
self.cublasCgemm_v2( self._cublas_handle, transa, transb,
m, n, k, alpha, M, lda, x, ldb, beta, y, ldc )
class cublasOperator_t(c_uint):
CUBLAS_OP_N = 0
CUBLAS_OP_T = 1
CUBLAS_OP_C = 2
@wrap(cublas)
def cublasCgemm_v2(
handle : cublasHandle_t,
transa : cublasOperator_t,
transb : cublasOperator_t,
m : c_int,
n : c_int,
k : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
M : dndarray,
lda : c_int,
x : dndarray,
ldb : c_int,
beta : ndpointer(dtype=np.complex64, ndim=0),
y : dndarray,
ldc : c_int,
) -> cublasStatus_t:
pass
def csymm(self, y, M, x, alpha, beta, left=True):
assert isinstance(x, self.dndarray)
alpha = np.array(alpha, dtype=np.complex64)
beta = np.array( beta, dtype=np.complex64)
(m, n), k = y.shape, x.shape[0]
lda = M._leading_dim
ldb = x._leading_dim
ldc = y._leading_dim
uplo = CudaBackend.cublasFillMode_t.UPPER
side = getattr(CudaBackend.cublasSideMode_t, 'LEFT' if left else 'RIGHT')
self.cublasCsymm_v2( self._cublas_handle, side, uplo,
m, n, alpha, M, lda, x, ldb, beta, y, ldc )
class cublasSideMode_t(c_uint):
LEFT = 0
RIGHT = 1
class cublasFillMode_t(c_uint):
LOWER = 0
UPPER = 1
@wrap(cublas)
def cublasCsymm_v2(
handle : cublasHandle_t,
size : cublasSideMode_t,
uplo : cublasFillMode_t,
m : c_int,
n : c_int,
alpha : ndpointer(dtype=np.complex64, ndim=0),
M : dndarray,
lda : c_int,
x : dndarray,
ldb : c_int,
beta : ndpointer(dtype=np.complex64, ndim=0),
y : dndarray,
ldc : c_int,
) -> cublasStatus_t:
pass
# -----------------------------------------------------------------------
# FFT Routines
# -----------------------------------------------------------------------
class cufftHandle_t(c_int):
def __init__(self, backend):
super(CudaBackend.cufftHandle_t, self).__init__()
self._backend = backend
self._backend.cufftCreate( byref(self) )
def __del__(self):
self._backend.cufftDestroy(self)
class cufftType_t(c_int):
pass
class cufftResult_t(c_int):
def _check(self, backend, fn):
if self.value != 0:
log.critical("cufft function %s returned error code %d",
fn.__name__, self.value)
raise RuntimeError
CUFFT_C2C = 0x29
CUFFT_FORWARD = -1
CUFFT_INVERSE = 1
@wrap(cufft)
def cufftSetAutoAllocation(
plan : cufftHandle_t,
auto : c_int,
) -> cufftResult_t:
pass
@wrap(cufft)
def cufftSetWorkArea(
plan : cufftHandle_t,
area : dndarray,
) -> cufftResult_t:
pass
@wrap(cufft)
def cufftMakePlanMany(
plan : cufftHandle_t,
rank : c_int,
n : POINTER(c_int),
inembed : POINTER(c_int), istride: c_int, idist: c_int,
onembed : POINTER(c_int), ostride: c_int, odist: c_int,
typ : cufftType_t,
batch : c_int,
workSize: POINTER(c_size_t),
) -> cufftResult_t:
pass
@wrap(cufft)
def cufftExecC2C(
plan : cufftHandle_t,
idata : dndarray,
odata : dndarray,
direction : c_int,
) -> cufftResult_t:
pass
@wrap(cufft)
def cufftCreate(
plan : POINTER(cufftHandle_t),
) -> cufftResult_t:
pass
@wrap(cufft)
def cufftDestroy(
plan : cufftHandle_t,
) -> cufftResult_t:
pass
def _get_or_create_plan(self, x_shape):
if x_shape not in self._plans:
N = x_shape[:-1][::-1]
batch = x_shape[-1]
dims = (c_int*len(N))(*N)
plan = CudaBackend.cufftHandle_t(self)
ws = c_size_t()
self.cufftSetAutoAllocation(plan, 0)
self.cufftMakePlanMany(plan, len(dims), dims,
None, 0, 0, None, 0, 0, CudaBackend.CUFFT_C2C,
batch, byref(ws))
self._plans[x_shape] = (plan, ws.value)
return self._plans[x_shape]
def _fft_workspace_size(self, x_shape):
plan, workSize = self._get_or_create_plan(x_shape)
return workSize
def fftn(self, y, x):
plan, workSize = self._get_or_create_plan(x.shape)
with self.scratch(nbytes=workSize) as tmp:
self.cufftSetWorkArea(plan, tmp)
self.cufftExecC2C(plan, x, y, CudaBackend.CUFFT_FORWARD)
def ifftn(self, y, x):
plan, workSize = self._get_or_create_plan(x.shape)
with self.scratch(nbytes=workSize) as tmp:
self.cufftSetWorkArea(plan, tmp)
self.cufftExecC2C(plan, x, y, CudaBackend.CUFFT_INVERSE)
# -----------------------------------------------------------------------
# Cusparse
# -----------------------------------------------------------------------
class cusparseHandle_t(c_void_p):
def __init__(self, backend):
super().__init__()
self._backend = backend
self._backend.cusparseCreate( byref(self) )
def __del__(self):
self._backend.cusparseDestroy(self)
class cusparseOperation_t(c_int): pass
class cusparseMatDescr_t(c_void_p):
def __init__(self, backend):
super().__init__()
self._backend = backend
self._backend.cusparseCreateMatDescr( byref(self) )
def __del__(self):
self._backend.cusparseDestroyMatDescr(self)
class cusparseStatus_t(c_int):
def _check(self, backend, fn):
if self.value != 0:
log.critical("cusparse function %s returned error code %d",
fn.__name__, self.value)
raise RuntimeError
CUSPARSE_OPERATION_NON_TRANSPOSE = 0
CUSPARSE_OPERATION_TRANSPOSE = 1
CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE = 2
@wrap(cusparse)
def cusparseCreate(
handle : POINTER(cusparseHandle_t)
) -> cusparseStatus_t:
pass
@wrap(cusparse)
def cusparseDestroy(
handle : cusparseHandle_t
) -> cusparseStatus_t:
pass
@wrap(cusparse)
def cusparseCreateMatDescr(
descr : POINTER(cusparseMatDescr_t)
) -> cusparseStatus_t:
pass
@wrap(cusparse)
def cusparseDestroyMatDescr(
descr : cusparseMatDescr_t
) -> cusparseStatus_t:
pass
@wrap(cusparse)
def cusparseCcsrmm(
handle : cusparseHandle_t,
transA : cusparseOperation_t,
m : c_int,
n : c_int,
k : c_int,
nnz : c_int,
alpha : c_complex,
descrA : cusparseMatDescr_t,
csrValA : dndarray,
csrRowPtrA : dndarray,
csrColIndA : dndarray,
B : dndarray,
ldb : c_int,
beta : c_complex,
C : dndarray,
ldc : c_int,
) -> cusparseStatus_t:
pass
def ccsrmm(self, y, A_shape, A_indx, A_ptr, A_vals, x, alpha, beta, adjoint=False, exwrite=False):
m, k = A_shape
n = x.shape[1]
ldx = x._leading_dim
ldy = y._leading_dim
if adjoint:
trans = self.CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE
else:
trans = self.CUSPARSE_OPERATION_NON_TRANSPOSE
alpha = c_complex(alpha)
beta = c_complex(beta)
self.cusparseCcsrmm( self._cusparse_handle, trans, m, n, k,
A_vals.size, (alpha), self._mat_descr,
A_vals, A_ptr, A_indx, x, ldx, (beta), y, ldy
)
# -----------------------------------------------------------------------
# Misc Routines
# -----------------------------------------------------------------------
@staticmethod
def max(val, arr):
from indigo.backends._customgpu import max as fastmax
fastmax(arr.size*2, val, arr._arr.value)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova.conductor.tasks import live_migrate
import nova.context
from nova import exception
from nova import manager
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common.rpc import common as rpc_common
from nova import quota
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
RPC_API_VERSION = '2.7'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(service_name='scheduler',
*args, **kwargs)
def post_start_hook(self):
"""After we start up and can receive messages via RPC, tell all
compute nodes to send us their capabilities.
"""
ctxt = nova.context.get_admin_context()
compute_rpcapi.ComputeAPI().publish_service_capabilities(ctxt)
def update_service_capabilities(self, context, service_name,
host, capabilities):
"""Process a capability update from a service node."""
if not isinstance(capabilities, list):
capabilities = [capabilities]
for capability in capabilities:
if capability is None:
capability = {}
self.driver.update_service_capabilities(service_name, host,
capability)
def create_volume(self, context, volume_id, snapshot_id,
reservations=None, image_id=None):
#function removed in RPC API 2.3
pass
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
try:
self._schedule_live_migration(context, instance, dest,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
context, ex, request_spec)
except Exception as ex:
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
def _schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
task = live_migrate.LiveMigrationTask(context, instance,
dest, block_migration, disk_over_commit,
self.driver.select_hosts)
return task.execute()
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
"""Tries to call schedule_run_instance on the driver.
Sets instance vm_state to ERROR on exceptions
"""
instance_uuids = request_spec['instance_uuids']
with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
'schedule', *instance_uuids):
try:
return self.driver.schedule_run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
except exception.NoValidHost as ex:
# don't re-raise
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
def prep_resize(self, context, image, request_spec, filter_properties,
instance, instance_type, reservations):
"""Tries to call schedule_prep_resize on the driver.
Sets instance vm_state to ACTIVE on NoHostFound
Sets vm_state to ERROR on other exceptions
"""
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
'schedule', instance_uuid):
try:
kwargs = {
'context': context,
'image': image,
'request_spec': request_spec,
'filter_properties': filter_properties,
'instance': instance,
'instance_type': instance_type,
'reservations': reservations,
}
return self.driver.schedule_prep_resize(**kwargs)
except exception.NoValidHost as ex:
self._set_vm_state_and_notify('prep_resize',
{'vm_state': vm_states.ACTIVE,
'task_state': None},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('prep_resize',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'scheduler', method, updates, ex, request_spec, self.db)
# NOTE(hanlind): This method can be removed in v3.0 of the RPC API.
def show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns:
example format is below::
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
'vcpus_used': 12, 'memory_mb_used': 10240,
'local_gb_used': 64}
"""
# Getting compute node info and related instances info
service_ref = self.db.service_get_by_compute_host(context, host)
instance_refs = self.db.instance_get_all_by_host(context,
service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
'vcpus_used': compute_ref['vcpus_used'],
'memory_mb_used': compute_ref['memory_mb_used'],
'local_gb_used': compute_ref['local_gb_used']}
usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
root = [i['root_gb'] for i in instance_refs
if i['project_id'] == project_id]
ephemeral = [i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
usage[project_id] = {'vcpus': sum(vcpus),
'memory_mb': sum(mem),
'root_gb': sum(root),
'ephemeral_gb': sum(ephemeral)}
return {'resource': resource, 'usage': usage}
@periodic_task.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
# NOTE(russellb) This method can be removed in 3.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
@rpc_common.client_exceptions(exception.NoValidHost)
def select_hosts(self, context, request_spec, filter_properties):
"""Returns host(s) best suited for this request_spec
and filter_properties.
"""
hosts = self.driver.select_hosts(context, request_spec,
filter_properties)
return jsonutils.to_primitive(hosts)
@rpc_common.client_exceptions(exception.NoValidHost)
def select_destinations(self, context, request_spec, filter_properties):
"""Returns destinations(s) best suited for this request_spec and
filter_properties.
The result should be a list of dicts with 'host', 'nodename' and
'limits' as keys.
"""
dests = self.driver.select_destinations(context, request_spec,
filter_properties)
return jsonutils.to_primitive(dests)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/global/interface-attributes/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters related to MPLS interfaces:
"""
__slots__ = ("_path_helper", "_extmethods", "__interface_id", "__mpls_enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
self.__mpls_enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"global",
"interface-attributes",
"interface",
"config",
]
def _get_interface_id(self):
"""
Getter method for interface_id, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/interface_id (oc-if:interface-id)
YANG Description: Indentifier for the MPLS interface
"""
return self.__interface_id
def _set_interface_id(self, v, load=False):
"""
Setter method for interface_id, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/interface_id (oc-if:interface-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_id() directly.
YANG Description: Indentifier for the MPLS interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface_id must be of a type compatible with oc-if:interface-id""",
"defined-type": "oc-if:interface-id",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-if:interface-id', is_config=True)""",
}
)
self.__interface_id = t
if hasattr(self, "_set"):
self._set()
def _unset_interface_id(self):
self.__interface_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
def _get_mpls_enabled(self):
"""
Getter method for mpls_enabled, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/mpls_enabled (boolean)
YANG Description: Enable MPLS forwarding on this interface
"""
return self.__mpls_enabled
def _set_mpls_enabled(self, v, load=False):
"""
Setter method for mpls_enabled, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/mpls_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_enabled() directly.
YANG Description: Enable MPLS forwarding on this interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mpls_enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="mpls-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__mpls_enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_mpls_enabled(self):
self.__mpls_enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
interface_id = __builtin__.property(_get_interface_id, _set_interface_id)
mpls_enabled = __builtin__.property(_get_mpls_enabled, _set_mpls_enabled)
_pyangbind_elements = OrderedDict(
[("interface_id", interface_id), ("mpls_enabled", mpls_enabled)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/global/interface-attributes/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters related to MPLS interfaces:
"""
__slots__ = ("_path_helper", "_extmethods", "__interface_id", "__mpls_enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
self.__mpls_enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"global",
"interface-attributes",
"interface",
"config",
]
def _get_interface_id(self):
"""
Getter method for interface_id, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/interface_id (oc-if:interface-id)
YANG Description: Indentifier for the MPLS interface
"""
return self.__interface_id
def _set_interface_id(self, v, load=False):
"""
Setter method for interface_id, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/interface_id (oc-if:interface-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_id() directly.
YANG Description: Indentifier for the MPLS interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface_id must be of a type compatible with oc-if:interface-id""",
"defined-type": "oc-if:interface-id",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-if:interface-id', is_config=True)""",
}
)
self.__interface_id = t
if hasattr(self, "_set"):
self._set()
def _unset_interface_id(self):
self.__interface_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-if:interface-id",
is_config=True,
)
def _get_mpls_enabled(self):
"""
Getter method for mpls_enabled, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/mpls_enabled (boolean)
YANG Description: Enable MPLS forwarding on this interface
"""
return self.__mpls_enabled
def _set_mpls_enabled(self, v, load=False):
"""
Setter method for mpls_enabled, mapped from YANG variable /network_instances/network_instance/mpls/global/interface_attributes/interface/config/mpls_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_enabled() directly.
YANG Description: Enable MPLS forwarding on this interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mpls_enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="mpls-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__mpls_enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_mpls_enabled(self):
self.__mpls_enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="mpls-enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
interface_id = __builtin__.property(_get_interface_id, _set_interface_id)
mpls_enabled = __builtin__.property(_get_mpls_enabled, _set_mpls_enabled)
_pyangbind_elements = OrderedDict(
[("interface_id", interface_id), ("mpls_enabled", mpls_enabled)]
)
|
|
from __future__ import unicode_literals
from calendar import monthrange
import datetime
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.utils.six.moves import xrange
from .event_factory import create_event, SetMeUp
class BiweeklyRepeatingChunkEventListViewTest(SetMeUp):
"""Test repeating 'chunk' events"""
def check_dates(self, event, valid_dates):
"""A DRY helper function."""
for year, dates in valid_dates.items():
for month, days in dates.items():
response = self.client.get(reverse(
'calendar:list', kwargs={'year': year, 'month': month}
))
self.clean_whitespace(response)
if days:
self.assertContains(response, event.title)
else:
self.assertNotContains(response, event.title)
for day in days:
self.assertContains(response, self.cal_str(day))
[self.assertNotContains(response, self.cal_str(day)) for
day in xrange(1, monthrange(2014, int(month))[1] + 1)
if day not in days]
def test_biweekly_repeating_chunk_two_days_span_two_months(self):
"""
Test a biweekly repeating chunk that lasts two days and
spans 2 different months when it starts.
"""
event = create_event(
start_date=(2014, 3, 31),
end_date=(2014, 4, 1),
created_by=self.user,
title="Ruby",
description="'chunk' event that lasts 2 days and repeats weekly.",
repeat="BIWEEKLY"
)
valid_dates = {
'2014': {
'03': [31],
'04': [1, 14, 15, 28, 29],
'05': [12, 13, 26, 27],
'06': [9, 10, 23, 24],
'07': [7, 8, 21, 22],
'08': [4, 5, 18, 19],
'09': [1, 2, 15, 16, 29, 30],
'10': [13, 14, 27, 28],
'11': [10, 11, 24, 25],
'12': [8, 9, 22, 23]
},
'2015': {
'01': [5, 6, 19, 20],
'02': [2, 3, 16, 17],
'03': [2, 3, 16, 17, 30, 31],
'04': [13, 14, 27, 28],
}
}
self.check_dates(event, valid_dates)
def test_biweekly_repeating_chunk_six_days_span_two_months(self):
"""
Test a biweekly repeating chunk that lasts six days and
spans 2 different months when it starts.
"""
event = create_event(
start_date=(2014, 3, 28),
end_date=(2014, 4, 2),
created_by=self.user,
title="Fred",
description="'chunk' event that lasts 6 days and repeats weekly.",
repeat="BIWEEKLY"
)
valid_dates = {
'2014': {
'03': [28, 29, 30, 31],
'04': [1, 2, 11, 12, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30],
'05': [9, 10, 11, 12, 13, 14, 23, 24, 25, 26, 27, 28],
'06': [6, 7, 8, 9, 10, 11, 20, 21, 22, 23, 24, 25],
'07': [4, 5, 6, 7, 8, 9, 18, 19, 20, 21, 22, 23],
'08': [1, 2, 3, 4, 5, 6, 15, 16, 17, 18, 19, 20, 29, 30, 31],
'09': [1, 2, 3, 12, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30],
'10': [1, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29],
'11': [7, 8, 9, 10, 11, 12, 21, 22, 23, 24, 25, 26],
'12': [5, 6, 7, 8, 9, 10, 19, 20, 21, 22, 23, 24],
},
'2015': {
'01': [2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 30, 31],
'02': [1, 2, 3, 4, 13, 14, 15, 16, 17, 18, 27, 28],
'03': [1, 2, 3, 4, 13, 14, 15, 16, 17, 18, 27, 28, 29, 30, 31],
'04': [1, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29],
}
}
self.check_dates(event, valid_dates)
def test_biweekly_repeating_chunk_three_days_span_one_month(self):
"""
Test a biweekly repeating chunk that lasts three days with
start_date and end_date in the same month, and the event scheduled
early in the month.
"""
event = create_event(
start_date=(2014, 2, 3),
end_date=(2014, 2, 5),
created_by=self.user,
title="Maria",
description="chunk event that lasts 3 days and repeats biweekly.",
repeat="BIWEEKLY"
)
valid_dates = {
'2014': {
'02': [3, 4, 5, 17, 18, 19],
'03': [3, 4, 5, 17, 18, 19, 31],
'04': [1, 2, 14, 15, 16, 28, 29, 30],
'05': [12, 13, 14, 26, 27, 28],
'06': [9, 10, 11, 23, 24, 25],
'07': [7, 8, 9, 21, 22, 23],
'08': [4, 5, 6, 18, 19, 20],
'09': [1, 2, 3, 15, 16, 17, 29, 30],
'10': [1, 13, 14, 15, 27, 28, 29],
'11': [10, 11, 12, 24, 25, 26],
'12': [8, 9, 10, 22, 23, 24],
},
'2015': {
'01': [5, 6, 7, 19, 20, 21],
'02': [2, 3, 4, 16, 17, 18],
'03': [2, 3, 4, 16, 17, 18, 30, 31],
}
}
self.check_dates(event, valid_dates)
def test_biweekly_repeating_chunk_three_days_span_one_month2(self):
"""
Test a weekly repeating chunk that lasts two days with
start_date and end_date in the same month, and the event scheduled
early in the month.
"""
event = create_event(
start_date=(2014, 3, 14),
end_date=(2014, 3, 16),
created_by=self.user,
title="Scar",
description="chunk event that lasts 3 days and repeats biweekly.",
repeat="BIWEEKLY"
)
valid_dates = {
'2014': {
'03': [14, 15, 16, 28, 29, 30],
'04': [11, 12, 13, 25, 26, 27],
'05': [9, 10, 11, 23, 24, 25],
'06': [6, 7, 8, 20, 21, 22],
'07': [4, 5, 6, 18, 19, 20],
'08': [1, 2, 3, 15, 16, 17, 29, 30, 31],
'09': [12, 13, 14, 26, 27, 28],
'10': [10, 11, 12, 24, 25, 26],
'11': [7, 8, 9, 21, 22, 23],
'12': [5, 6, 7, 19, 20, 21],
},
'2015': {
'01': [2, 3, 4, 16, 17, 18, 30, 31],
'02': [1, 13, 14, 15, 27, 28],
'03': [1, 13, 14, 15, 27, 28, 29],
}
}
self.check_dates(event, valid_dates)
def test_biweekly_repeating_chunk_with_end_repeat(self):
"""
Test that a biweekly repeating chunk honors end_repeat.
"""
event = create_event(
start_date=(2014, 3, 31),
end_date=(2014, 4, 1),
created_by=self.user,
title="Chelsea",
description="I should end on end_repeat.",
repeat="BIWEEKLY",
end_repeat=datetime.date(2014, 6, 9)
)
valid_dates = {
'2014': {
'03': [31],
'04': [1, 14, 15, 28, 29],
'05': [12, 13, 26, 27],
'06': [9],
'07': [],
},
'2015': {'01': [], '03': [], '04': []}
}
self.check_dates(event, valid_dates)
|
|
import functools
from binascii import hexlify
from typing import List, Tuple, Sequence
import ledger.merkle_tree as merkle_tree
from ledger.hash_stores.hash_store import HashStore
from ledger.hash_stores.memory_hash_store import MemoryHashStore
from ledger.tree_hasher import TreeHasher
from ledger.util import ConsistencyVerificationFailed
from ledger.util import count_bits_set, lowest_bit_set
class CompactMerkleTree(merkle_tree.MerkleTree):
"""Compact representation of a Merkle Tree that permits only extension.
Attributes:
tree_size: Number of leaves in this tree.
hashes: That of the full (i.e. size 2^k) subtrees that form this tree,
sorted in descending order of size.
"""
def __init__(self, hasher=TreeHasher(), tree_size=0, hashes=(),
hashStore=None):
# These two queues should be written to two simple position-accessible
# arrays (files, database tables, etc.)
self.__hashStore = hashStore or MemoryHashStore() # type: HashStore
self.__hasher = hasher
self._update(tree_size, hashes)
@property
def hashStore(self):
return self.__hashStore
def _update(self, tree_size: int, hashes: Sequence[bytes]):
bits_set = count_bits_set(tree_size)
num_hashes = len(hashes)
if num_hashes != bits_set:
msgfmt = "number of hashes != bits set in tree_size: %s vs %s"
raise ValueError(msgfmt % (num_hashes, bits_set))
self.__tree_size = tree_size
self.__hashes = tuple(hashes)
# height of the smallest subtree, or 0 if none exists (empty tree)
self.__mintree_height = lowest_bit_set(tree_size)
self.__root_hash = None
def load(self, other: merkle_tree.MerkleTree):
"""Load this tree from a dumb data object for serialisation.
The object must have attributes tree_size:int and hashes:list.
"""
self._update(other.tree_size, other.hashes)
def save(self, other: merkle_tree.MerkleTree):
"""Save this tree into a dumb data object for serialisation.
The object must have attributes tree_size:int and hashes:list.
"""
other.__tree_size = self.__tree_size
other.__hashes = self.__hashes
def __copy__(self):
return self.__class__(self.__hasher, self.__tree_size, self.__hashes)
def __repr__(self):
return "%s(%r, %r, %r)" % (
self.__class__.__name__,
self.__hasher, self.__tree_size, self.__hashes)
def __len__(self):
return self.__tree_size
@property
def tree_size(self) -> int:
return self.__tree_size
@property
def hashes(self) -> Tuple[bytes]:
return self.__hashes
@property
def root_hash(self):
"""Returns the root hash of this tree. (Only re-computed on change.)"""
if self.__root_hash is None:
self.__root_hash = (
self.__hasher._hash_fold(self.__hashes)
if self.__hashes else self.__hasher.hash_empty())
return self.__root_hash
@property
def root_hash_hex(self):
"""Returns the root hash of this tree. (Only re-computed on change.)"""
return hexlify(self.root_hash)
def _push_subtree(self, leaves: List[bytes]):
"""Extend with a full subtree <= the current minimum subtree.
The leaves must form a full subtree, i.e. of size 2^k for some k. If
there is a minimum subtree (i.e. __mintree_height > 0), then the input
subtree must be smaller or of equal size to the minimum subtree.
If the subtree is smaller (or no such minimum exists, in an empty
tree), we can simply append its hash to self.hashes, since this
maintains the invariant property of being sorted in descending
size order.
If the subtree is of equal size, we are in a similar situation to an
addition carry. We handle it by combining the two subtrees into a
larger subtree (of size 2^(k+1)), then recursively trying to add
this new subtree back into the tree.
Any collection of leaves larger than the minimum subtree must undergo
additional partition to conform with the structure of a merkle tree,
which is a more complex operation, performed by extend().
"""
size = len(leaves)
if count_bits_set(size) != 1:
raise ValueError("invalid subtree with size != 2^k: %s" % size)
# in general we want the highest bit, but here it's also the lowest bit
# so just reuse that code instead of writing a new highest_bit_set()
subtree_h, mintree_h = lowest_bit_set(size), self.__mintree_height
if mintree_h > 0 and subtree_h > mintree_h:
raise ValueError("subtree %s > current smallest subtree %s" % (
subtree_h, mintree_h))
root_hash, hashes = self.__hasher._hash_full(leaves, 0, size)
if self.hashStore:
for h in hashes:
self.hashStore.writeLeaf(h)
new_node_hashes = self.__push_subtree_hash(subtree_h, root_hash)
nodes = [(self.tree_size, height, h) for h, height in new_node_hashes]
if self.hashStore:
for node in nodes:
self.hashStore.writeNode(node)
def __push_subtree_hash(self, subtree_h: int, sub_hash: bytes):
size, mintree_h = 1 << (subtree_h - 1), self.__mintree_height
if subtree_h < mintree_h or mintree_h == 0:
self._update(self.tree_size + size, self.hashes + (sub_hash,))
return []
else:
assert subtree_h == mintree_h
# addition carry - rewind the tree and re-try with bigger subtree
prev_hash = self.hashes[-1]
self._update(self.tree_size - size, self.hashes[:-1])
new_mintree_h = self.__mintree_height
assert mintree_h < new_mintree_h or new_mintree_h == 0
next_hash = self.__hasher.hash_children(prev_hash, sub_hash)
return [(next_hash, subtree_h)] + self.__push_subtree_hash(
subtree_h + 1, next_hash)
def append(self, new_leaf: bytes) -> List[bytes]:
"""Append a new leaf onto the end of this tree and return the
audit path"""
auditPath = list(reversed(self.__hashes))
self._push_subtree([new_leaf])
return auditPath
def extend(self, new_leaves: List[bytes]):
"""Extend this tree with new_leaves on the end.
The algorithm works by using _push_subtree() as a primitive, calling
it with the maximum number of allowed leaves until we can add the
remaining leaves as a valid entire (non-full) subtree in one go.
"""
size = len(new_leaves)
final_size = self.tree_size + size
idx = 0
while True:
# keep pushing subtrees until mintree_size > remaining
max_h = self.__mintree_height
max_size = 1 << (max_h - 1) if max_h > 0 else 0
if max_h > 0 and size - idx >= max_size:
self._push_subtree(new_leaves[idx:idx + max_size])
idx += max_size
else:
break
# fill in rest of tree in one go, now that we can
if idx < size:
root_hash, hashes = self.__hasher._hash_full(new_leaves, idx, size)
self._update(final_size, self.hashes + hashes)
assert self.tree_size == final_size
def extended(self, new_leaves: List[bytes]):
"""Returns a new tree equal to this tree extended with new_leaves."""
new_tree = self.__copy__()
new_tree.extend(new_leaves)
return new_tree
def merkle_tree_hash_hex(self, start: int, end: int):
mth = self.merkle_tree_hash(start, end)
return hexlify(mth)
@functools.lru_cache(maxsize=256)
def merkle_tree_hash(self, start: int, end: int):
if not end > start:
raise ValueError("end must be greater than start")
if (end - start) == 1:
return self.hashStore.readLeaf(end)
leafs, nodes = self.hashStore.getPath(end, start)
leafHash = self.hashStore.readLeaf(end)
hashes = [leafHash, ]
for h in leafs:
hashes.append(self.hashStore.readLeaf(h))
for h in nodes:
hashes.append(self.hashStore.readNode(h))
foldedHash = self.__hasher._hash_fold(hashes[::-1])
return foldedHash
def consistency_proof(self, first: int, second: int):
return [self.merkle_tree_hash(a, b) for a, b in
self._subproof(first, 0, second, True)]
def inclusion_proof(self, start, end):
return [self.merkle_tree_hash(a, b)
for a, b in self._path(start, 0, end)]
def _subproof(self, m, start_n: int, end_n: int, b: int):
n = end_n - start_n
if m == n:
if b:
return []
else:
return [(start_n, end_n)]
else:
k = 1 << (len(bin(n - 1)) - 3)
if m <= k:
return self._subproof(m, start_n, start_n + k, b) + [
(start_n + k, end_n)]
else:
return self._subproof(m - k, start_n + k, end_n, False) + [
(start_n, start_n + k)]
def _path(self, m, start_n: int, end_n: int):
n = end_n - start_n
if n == 1:
return []
else:
# `k` is the largest power of 2 less than `n`
k = 1 << (len(bin(n - 1)) - 3)
if m < k:
return self._path(m, start_n, start_n + k) + [
(start_n + k, end_n)]
else:
return self._path(m - k, start_n + k, end_n) + [
(start_n, start_n + k)]
def get_tree_head(self, seq: int = None):
if seq is None:
seq = self.tree_size
if seq > self.tree_size:
raise IndexError
return {
'tree_size': seq,
'sha256_root_hash': self.merkle_tree_hash(0, seq) if seq else None,
}
@property
def leafCount(self) -> int:
return self.hashStore.leafCount
@property
def nodeCount(self) -> int:
return self.hashStore.nodeCount
@staticmethod
def get_expected_node_count(leaf_count):
"""
The number of nodes is the number of full subtrees present
"""
count = 0
while leaf_count > 1:
leaf_count //= 2
count += leaf_count
return count
def verify_consistency(self, expected_leaf_count) -> bool:
"""
Check that the tree has same leaf count as expected and the
number of nodes are also as expected
"""
if expected_leaf_count != self.leafCount:
raise ConsistencyVerificationFailed()
if self.get_expected_node_count(self.leafCount) != self.nodeCount:
raise ConsistencyVerificationFailed()
return True
def reset(self):
self.hashStore.reset()
self._update(tree_size=0,
hashes=())
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deprecated! Use WebHdfs instead.
Only some utils and Hdfs are still used.
Interfaces for Hadoop filesystem access via the HADOOP-4707 Thrift APIs.
"""
import errno
import logging
import os
import posixpath
import random
import stat as statconsts
import subprocess
import urlparse
import threading
from thrift.transport import TTransport
from django.utils.encoding import smart_str, force_unicode
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util, i18n
from desktop.lib.conf import validate_port
from hadoop.api.hdfs import Namenode, Datanode
from hadoop.api.hdfs.constants import QUOTA_DONT_SET, QUOTA_RESET
from hadoop.api.common.ttypes import RequestContext, IOException
import hadoop.conf
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.exceptions import PermissionDeniedException
from useradmin.conf import HOME_DIR_PERMISSIONS
LOG = logging.getLogger(__name__)
DEFAULT_USER = "webui"
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
# The buffer size of the pipe to hdfs -put during upload
WRITE_BUFFER_SIZE = 128*1024 # 128K
# Class that we translate into PermissionDeniedException
HADOOP_ACCESSCONTROLEXCEPTION = "org.apache.hadoop.security.AccessControlException"
# Timeout for thrift calls to NameNode
NN_THRIFT_TIMEOUT = 15
DN_THRIFT_TIMEOUT = 3
# Encoding used by HDFS namespace
HDFS_ENCODING = 'utf-8'
def encode_fs_path(path):
"""encode_fs_path(path) -> byte string in utf8"""
return smart_str(path, HDFS_ENCODING, errors='strict')
def decode_fs_path(path):
"""decode_fs_path(bytestring) -> unicode path"""
return force_unicode(path, HDFS_ENCODING, errors='strict')
def test_fs_configuration(fs_config, hadoop_bin_conf):
"""Test FS configuration. Returns list of (confvar, error)."""
TEST_FILE = '/tmp/.hue_config_test.%s' % (random.randint(0, 9999999999))
res = [ ]
res.extend(validate_port(fs_config.NN_THRIFT_PORT))
res.extend(validate_port(fs_config.NN_HDFS_PORT))
if res:
return res
# Check thrift plugin
try:
fs = HadoopFileSystem.from_config(
fs_config, hadoop_bin_path=hadoop_bin_conf.get())
fs.setuser(fs.superuser)
ls = fs.listdir('/')
except TTransport.TTransportException:
msg = 'Failed to contact Namenode plugin at %s:%s.' % \
(fs_config.NN_HOST.get(), fs_config.NN_THRIFT_PORT.get())
LOG.exception(msg)
res.append((fs_config, msg))
return res
except (IOError, IOException):
msg = 'Failed to see HDFS root directory at %s. Please check HDFS configuration.' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config, msg))
return res
if 'tmp' not in ls:
return res
# Check nn port (via upload)
try:
w_file = fs.open(TEST_FILE, 'w')
except OSError, ex:
msg = 'Failed to execute Hadoop (%s)' % (hadoop_bin_conf.get(),)
LOG.exception(msg)
res.append((hadoop_bin_conf, msg))
return res
try:
try:
w_file.write('hello world')
w_file.close()
except IOError:
msg = 'Failed to upload files using %s' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config.NN_HDFS_PORT, msg))
return res
# Check dn plugin (via read)
try:
r_file = fs.open(TEST_FILE, 'r')
r_file.read()
except Exception:
msg = 'Failed to read file. Are all datanodes configured with the HUE plugin?'
LOG.exception(msg)
res.append((fs_config, msg))
finally:
# Cleanup. Ignore if file not found.
try:
if fs.exists(TEST_FILE):
fs.remove(TEST_FILE)
except Exception, ex:
LOG.error('Failed to cleanup test file "%s:%s": %s' % (fs.uri, TEST_FILE, ex))
return res
def _coerce_exceptions(function):
"""
Decorator that causes exceptions thrown by the decorated function
to be coerced into generic exceptions from the hadoop.fs.exceptions
module.
"""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IOException, e:
e.msg = force_unicode(e.msg, errors='replace')
e.stack = force_unicode(e.stack, errors='replace')
LOG.exception("Exception in Hadoop FS call " + function.__name__)
if e.clazz == HADOOP_ACCESSCONTROLEXCEPTION:
raise PermissionDeniedException(e.msg, e)
else:
raise
return wrapper
class Hdfs(object):
"""
An abstract HDFS proxy
"""
@staticmethod
def basename(path):
return posixpath.basename(path)
@staticmethod
def dirname(path):
return posixpath.dirname(path)
@staticmethod
def split(path):
return posixpath.split(path)
@staticmethod
def join(first, *comp_list):
return posixpath.join(first, *comp_list)
@staticmethod
def abspath(path):
return posixpath.abspath(path)
@staticmethod
def normpath(path):
res = posixpath.normpath(path)
# Python normpath() doesn't eliminate leading double slashes
if res.startswith('//'):
return res[1:]
return res
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
i = url.find('://')
if i == -1:
# Not found. Treat the entire argument as an HDFS path
return ('hdfs', '', normpath(url), '', '')
schema = url[:i]
if schema not in ('hdfs', 'viewfs'):
# Default to standard for non-hdfs
return urlparse.urlsplit(url)
url = url[i+3:]
i = url.find('/')
if i == -1:
# Everything is netloc. Assume path is root.
return (schema, url, '/', '', '')
netloc = url[:i]
path = url[i:]
return (schema, netloc, normpath(path), '', '')
def listdir_recursive(self, path, glob=None):
"""
listdir_recursive(path, glob=None) -> [ entry names ]
Get directory entry names without stats, recursively.
"""
paths = [path]
while paths:
path = paths.pop()
if self.isdir(path):
hdfs_paths = self.listdir_stats(path, glob)
paths[:0] = [x.path for x in hdfs_paths]
yield path
def create_home_dir(self, home_path=None):
if home_path is None:
home_path = self.get_home_dir()
mode = int(HOME_DIR_PERMISSIONS.get(), 8)
if not self.exists(home_path):
user = self.user
try:
try:
self.setuser(self.superuser)
self.mkdir(home_path)
self.chmod(home_path, mode)
self.chown(home_path, user, user)
except IOError:
msg = 'Failed to create home dir ("%s") as superuser %s' % (home_path, self.superuser)
LOG.exception(msg)
raise
finally:
self.setuser(user)
def copyFromLocal(self, local_src, remote_dst, mode=0755):
remote_dst = remote_dst.endswith(posixpath.sep) and remote_dst[:-1] or remote_dst
local_src = local_src.endswith(posixpath.sep) and local_src[:-1] or local_src
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
(basename, filename) = os.path.split(local_src)
self._copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
def _copy_dir(self, local_dir, remote_dir, mode=0755):
self.mkdir(remote_dir, mode=mode)
for f in os.listdir(local_dir):
local_src = os.path.join(local_dir, f)
remote_dst = self.join(remote_dir, f)
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
self._copy_file(local_src, remote_dst)
def _copy_file(self, local_src, remote_dst, chunk_size=1024 * 1024 * 64):
if os.path.isfile(local_src):
if self.exists(remote_dst):
LOG.info(_('%(remote_dst)s already exists. Skipping.') % {'remote_dst': remote_dst})
return
else:
LOG.info(_('%(remote_dst)s does not exist. Trying to copy.') % {'remote_dst': remote_dst})
src = file(local_src)
try:
try:
self.create(remote_dst, permission=0755)
chunk = src.read(chunk_size)
while chunk:
self.append(remote_dst, chunk)
chunk = src.read(chunk_size)
LOG.info(_('Copied %s -> %s.') % (local_src, remote_dst))
except:
LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
raise
finally:
src.close()
else:
LOG.info(_('Skipping %s (not a file).') % local_src)
@_coerce_exceptions
def mktemp(self, subdir='', prefix='tmp', basedir=None):
"""
mktemp(prefix) -> <temp_dir or basedir>/<subdir>/prefix.<rand>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
while True:
name = prefix + '.' + str(random.getrandbits(RANDOM_BITS))
candidate = self.join(base, name)
if not self.exists(candidate):
return candidate
def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
"""
mkswap(filename, suffix) -> <temp_dir or basedir>/<subdir>/filename.<suffix>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
candidate = self.join(base, "%s.%s" % (filename, suffix))
return candidate
def exists(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def do_as_user(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'do_as_user'})
def create(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def append(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'append'})
def mkdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'mkdir'})
def isdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'isdir'})
def listdir_stats(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'listdir_stats'})
"""
Deprecated! Use WebHdfs instead
"""
class HadoopFileSystem(Hdfs):
"""
Implementation of Filesystem APIs through Thrift to a Hadoop cluster.
"""
def __init__(self, host, thrift_port, hdfs_port=8020,
nn_kerberos_principal="hdfs",
dn_kerberos_principal="hdfs",
security_enabled=False,
hadoop_bin_path="hadoop",
temp_dir='/tmp'):
"""
@param host hostname or IP of the namenode
@param thrift_port port on which the Thrift plugin is listening
@param hdfs_port port on which NameNode IPC is listening
@param hadoop_bin_path path to find the hadoop wrapper script on the
installed system - default is fine if it is in
the user's PATH env
@param temp_dir Temporary directory, for mktemp()
"""
self.host = host
self.thrift_port = thrift_port
self.hdfs_port = hdfs_port
self.security_enabled = security_enabled
self.nn_kerberos_principal = nn_kerberos_principal
self.dn_kerberos_principal = dn_kerberos_principal
self.hadoop_bin_path = hadoop_bin_path
self._resolve_hadoop_path()
self.security_enabled = security_enabled
self._temp_dir = temp_dir
self.nn_client = thrift_util.get_client(
Namenode.Client, host, thrift_port,
service_name="HDFS Namenode HUE Plugin",
use_sasl=security_enabled,
kerberos_principal=nn_kerberos_principal,
timeout_seconds=NN_THRIFT_TIMEOUT)
# The file systems are cached globally. We store
# user information in a thread-local variable so that
# safety can be preserved there.
self.thread_local = threading.local()
self.setuser(DEFAULT_USER)
LOG.debug("Initialized HadoopFS: %s:%d (%s)", host, thrift_port, hadoop_bin_path)
@classmethod
def from_config(cls, fs_config, hadoop_bin_path="hadoop"):
return cls(host=fs_config.NN_HOST.get(),
thrift_port=fs_config.NN_THRIFT_PORT.get(),
hdfs_port=fs_config.NN_HDFS_PORT.get(),
security_enabled=fs_config.SECURITY_ENABLED.get(),
nn_kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
dn_kerberos_principal=fs_config.DN_KERBEROS_PRINCIPAL.get(),
hadoop_bin_path=hadoop_bin_path)
def _get_hdfs_base(self):
return "hdfs://%s:%d" % (self.host, self.hdfs_port) # TODO(todd) fetch the port from the NN thrift
def _resolve_hadoop_path(self):
"""The hadoop_bin_path configuration may be a non-absolute path, in which case
it's checked against $PATH.
If the hadoop binary can't be found anywhere, raises an Exception.
"""
for path_dir in os.getenv("PATH", "").split(os.pathsep):
path = os.path.join(path_dir, self.hadoop_bin_path)
if os.path.exists(path):
self.hadoop_bin_path = os.path.abspath(path)
return
raise OSError(errno.ENOENT, "Hadoop binary (%s) does not exist." % (self.hadoop_bin_path,))
@property
def uri(self):
return self._get_hdfs_base()
@property
def superuser(self):
"""
Retrieves the user that Hadoop considers as
"superuser" by looking at ownership of /.
This is slightly inaccurate.
"""
return self.stats("/")["user"]
def setuser(self, user):
# Hadoop determines the groups the user belongs to on the server side.
self.thread_local.request_context = RequestContext()
if not self.request_context.confOptions:
self.request_context.confOptions = {}
self.thread_local.request_context.confOptions['effective_user'] = user
self.thread_local.user = user
@property
def user(self):
return self.thread_local.user
@property
def groups(self):
return self.thread_local.groups
@property
def request_context(self):
return self.thread_local.request_context
@_coerce_exceptions
def open(self, path, mode="r", *args, **kwargs):
if mode == "w":
return FileUpload(self, path, mode, *args, **kwargs)
return File(self, path, mode, *args, **kwargs)
@_coerce_exceptions
def remove(self, path):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "File not found: %s" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: %s" % path)
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=False)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def mkdir(self, path, mode=0755):
# TODO(todd) there should be a mkdir that isn't mkdirHIER
# (this is mkdir -p I think)
path = encode_fs_path(path)
success = self.nn_client.mkdirhier(self.request_context, normpath(path), mode)
if not success:
raise IOError("mkdir failed")
def _rmdir(self, path, recursive=False):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "Directory not found: %s" % (path,))
if not stat.isDir:
raise IOError(errno.EISDIR, "Is not a directory: %s" % (path,))
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=recursive)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def rmdir(self, path):
return self._rmdir(path)
@_coerce_exceptions
def rmtree(self, path):
return self._rmdir(path, True)
@_coerce_exceptions
def listdir(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self.basename(decode_fs_path(stat.path)) for stat in stats]
@_coerce_exceptions
def listdir_stats(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self._unpack_stat(s) for s in stats]
@_coerce_exceptions
def get_content_summaries(self, paths):
paths = [ normpath(encode_fs_path(path)) for path in paths ]
summaries = self.nn_client.multiGetContentSummary(self.request_context, paths)
def _fix_summary(summary):
summary.path = decode_fs_path(summary.path)
return summary
return [_fix_summary(s) for s in summaries]
@_coerce_exceptions
def rename(self, old, new):
old = encode_fs_path(old)
new = encode_fs_path(new)
success = self.nn_client.rename(
self.request_context, normpath(old), normpath(new))
if not success: #TODO(todd) these functions should just throw if failed
raise IOError("Rename failed")
@_coerce_exceptions
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (old_dir,))
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (new_dir,))
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(HadoopFileSystem.join(old_dir, dirent),
HadoopFileSystem.join(new_dir, dirent))
@_coerce_exceptions
def exists(self, path):
stat = self._hadoop_stat(path)
return stat is not None
@_coerce_exceptions
def isfile(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return not stat.isDir
@_coerce_exceptions
def isdir(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return stat.isDir
@_coerce_exceptions
def stats(self, path, raise_on_fnf=True):
stat = self._hadoop_stat(path)
if not stat:
if raise_on_fnf:
raise IOError(errno.ENOENT, "File %s not found" % (path,))
else:
return None
ret = self._unpack_stat(stat)
return ret
@_coerce_exceptions
def chmod(self, path, mode):
path = encode_fs_path(path)
self.nn_client.chmod(self.request_context, normpath(path), mode)
@_coerce_exceptions
def chown(self, path, user, group):
path = encode_fs_path(path)
self.nn_client.chown(self.request_context, normpath(path), user, group)
@_coerce_exceptions
def get_namenode_info(self):
(capacity, used, available) = self.nn_client.df(self.request_context)
return dict(
usage=dict(capacity_bytes=capacity,
used_bytes=used,
available_bytes=available),
)
@_coerce_exceptions
def _get_blocks(self, path, offset, length):
"""
Get block locations from the Name Node. Returns an array of Block
instances that might look like:
[ Block(path='/user/todd/motd', genStamp=1001, blockId=5564389078175231298,
nodes=[DatanodeInfo(xceiverCount=1, capacity=37265149952, name='127.0.0.1:50010',
thriftPort=53417, state=1, remaining=18987925504, host='127.0.0.1',
storageID='DS-1238582576-127.0.1.1-50010-1240968238474', dfsUsed=36864)], numBytes=424)]
"""
path = encode_fs_path(path)
blocks = self.nn_client.getBlocks(self.request_context, normpath(path), offset, length)
def _fix_block(blk):
blk.path = decode_fs_path(blk.path)
return blk
return [_fix_block(blk) for blk in blocks]
def _hadoop_stat(self, path):
"""Returns None if file does not exist."""
path = encode_fs_path(path)
try:
stat = self.nn_client.stat(self.request_context, normpath(path))
stat.path = decode_fs_path(stat.path)
return stat
except IOException, ioe:
if ioe.clazz == 'java.io.FileNotFoundException':
return None
raise
@_coerce_exceptions
def _read_block(self, block, offset, len):
"""
Reads a chunk of data from the given block from the first available
datanode that serves it.
@param block a thrift Block object
@param offset offset from the beginning of the block (not file)
@param len the number of bytes to read
"""
errs = []
unipath = block.path
block.path = encode_fs_path(block.path)
try:
for node in block.nodes:
dn_conn = self._connect_dn(node)
try:
try:
data = dn_conn.readBlock(self.request_context, block, offset, len)
return data.data
except Exception, e:
errs.append(e)
finally:
dn_conn.close()
finally:
block.path = unipath
raise IOError("Could not read block %s from any replicas: %s" % (block, repr(errs)))
@_coerce_exceptions
def set_diskspace_quota(self, path, size):
"""
Set the diskspace quota of a given path.
@param path The path to the given hdfs resource
@param size The amount of bytes that a given subtree of files can grow to.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if size < 0:
raise ValueError("The size quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, size)
@_coerce_exceptions
def set_namespace_quota(self, path, num_files):
"""
Set the maximum number of files of a given path.
@param path The path to the given hdfs resource
@param num_files The amount of files that can exist within that subtree.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if num_files < 0:
raise ValueError("The number of files quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), num_files, QUOTA_DONT_SET)
@_coerce_exceptions
def clear_diskspace_quota(self, path):
"""
Remove the diskspace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, QUOTA_RESET)
@_coerce_exceptions
def clear_namespace_quota(self, path):
"""
Remove the namespace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_RESET, QUOTA_DONT_SET)
@_coerce_exceptions
def get_diskspace_quota(self, path):
"""
Get the current space quota in bytes for disk space. None if it is unset
"""
path = encode_fs_path(path)
space_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).spaceQuota
if space_quota == QUOTA_RESET or space_quota == QUOTA_DONT_SET:
return None
else:
return space_quota
@_coerce_exceptions
def get_namespace_quota(self, path):
"""
Get the current quota in number of files. None if it is unset
"""
path = encode_fs_path(path)
file_count_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).quota
if file_count_quota == QUOTA_RESET or file_count_quota == QUOTA_DONT_SET:
return None
else:
return file_count_quota
@_coerce_exceptions
def get_usage_and_quota(self, path):
"""
Returns a dictionary with "file_count", "file_quota",
"space_used", and "space_quota". The quotas
may be None.
"""
path = encode_fs_path(path)
summary = self.nn_client.getContentSummary(self.request_context, normpath(path))
ret = dict()
ret["file_count"] = summary.fileCount
ret["space_used"] = summary.spaceConsumed
if summary.quota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["file_quota"] = None
else:
ret["file_quota"] = summary.quota
if summary.spaceQuota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["space_quota"] = None
else:
ret["space_quota"] = summary.spaceQuota
return ret
@_coerce_exceptions
def get_delegation_token(self):
# TODO(atm): The second argument here should really be the Hue kerberos
# principal, which doesn't exist yet. Todd's working on that.
return self.nn_client.getDelegationToken(self.request_context, 'hadoop')
def _connect_dn(self, node):
dn_conf = thrift_util.ConnectionConfig(
Datanode.Client,
node.host,
node.thriftPort,
"HDFS Datanode Thrift",
use_sasl=self.security_enabled,
kerberos_principal=self.dn_kerberos_principal,
timeout_seconds=DN_THRIFT_TIMEOUT)
service, protocol, transport = \
thrift_util.connect_to_thrift(dn_conf)
transport.open()
service.close = lambda: transport.close()
return service
@staticmethod
def _unpack_stat(stat):
"""Unpack a Thrift "Stat" object into a dictionary that looks like fs.stat"""
mode = stat.perms
if stat.isDir:
mode |= statconsts.S_IFDIR
else:
mode |= statconsts.S_IFREG
return {
'path': decode_fs_path(stat.path),
'size': stat.length,
'mtime': stat.mtime / 1000,
'mode': mode,
'user': stat.owner,
'group': stat.group,
'atime': stat.atime
}
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
return Hdfs.urlsplit(url)
def require_open(func):
"""
Decorator that ensures that the file instance isn't closed when the
function is run.
"""
def wrapper(self, *args, **kwargs):
if self.closed:
raise IOError(errno.EBADF, "I/O operation on closed file")
return func(self, *args, **kwargs)
return wrapper
class File(object):
""" Represents an open file on HDFS. """
def __init__(self, fs, path, mode="r", buffering=False):
self.fs = fs
self.path = normpath(path)
self.pos = 0
self.closed = False
self._block_cache = BlockCache()
if buffering or mode != "r":
raise Exception("buffering and write support not yet implemented") # NYI
stat = self._stat()
if stat is None:
raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: '%s'" % path)
#TODO(todd) somehow we need to check permissions here - maybe we need an access() call?
# Minimal context manager implementation.
# See: http://www.python.org/doc/2.5.2/lib/typecontextmanager.html
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False # don't supress exceptions.
@require_open
def seek(self, offset, whence=0):
""" Set the file pointer to the given spot. @see file.seek """
if whence == SEEK_SET:
self.pos = offset
elif whence == SEEK_CUR:
self.pos += offset
elif whence == SEEK_END:
self.pos = self._stat().length + offset
else:
raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
@require_open
def tell(self):
return self.pos
def _get_block(self, pos):
"""Return the Block instance that contains the given offset"""
cached_block = self._block_cache.find_block(pos)
if cached_block:
return cached_block
# Cache "miss" - fetch ahead 500MB worth of blocks
new_blocks = self.fs._get_blocks(self.path, pos, 500*1024*1024)
self._block_cache.insert_new_blocks(new_blocks)
result = self._block_cache.find_block(pos)
if not result:
raise IOError("No block for position %d in file %s" % (pos, self.path))
return result
@require_open
def _read_in_block(self, length=DEFAULT_READ_SIZE):
"""
Tries to read up to length bytes, but will often read fewer, since
a single call will not read across a block boundary.
"""
end_pos = min(self.pos + length, self._stat().length)
# If we're at EOF, return empty string
if end_pos == self.pos:
return ""
block = self._get_block(self.pos)
assert _block_contains_pos(block, self.pos)
assert block.path == self.path
in_block_pos = self.pos - block.startOffset
assert in_block_pos >= 0
in_block_len = min(length, block.numBytes - in_block_pos)
result = self.fs._read_block(block, in_block_pos, in_block_len)
self.pos += len(result)
assert self.pos <= end_pos
return result
@require_open
def read(self, length=DEFAULT_READ_SIZE):
"""
Read the given number of bytes from this file.
If EOF has been reached, returns the empty string.
@param length the number of bytes wanted
"""
result = []
read_so_far = 0
while read_so_far < length:
this_data = self._read_in_block(length - read_so_far)
if this_data == "": # eof
break
read_so_far += len(this_data)
result.append(this_data)
return "".join(result)
def close(self):
self.closed = True
def _stat(self):
if not hasattr(self, "_stat_cache"):
self._stat_cache = self.fs._hadoop_stat(self.path)
return self._stat_cache
class FileUpload(object):
"""A write-only file that supports no seeking and cannot exist prior to
opening.
"""
def __init__(self, fs, path, mode="w", block_size=None):
self.fs = fs
self.closed = False
assert mode == "w"
extra_confs = []
if block_size:
extra_confs.append("-Ddfs.block.size=%d" % block_size)
self.subprocess_cmd = [self.fs.hadoop_bin_path,
"jar",
hadoop.conf.SUDO_SHELL_JAR.get(),
self.fs.user,
"-Dfs.default.name=" + self.fs.uri] + \
extra_confs + \
["-put", "-", encode_fs_path(path)]
self.subprocess_env = i18n.make_utf8_env()
if self.subprocess_env.has_key('HADOOP_CLASSPATH'):
self.subprocess_env['HADOOP_CLASSPATH'] += ':' + hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
else:
self.subprocess_env['HADOOP_CLASSPATH'] = hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
if hadoop.conf.HADOOP_CONF_DIR.get():
self.subprocess_env['HADOOP_CONF_DIR'] = hadoop.conf.HADOOP_CONF_DIR.get()
self.path = path
self.putter = subprocess.Popen(self.subprocess_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
env=self.subprocess_env,
bufsize=WRITE_BUFFER_SIZE)
@require_open
def write(self, data):
"""May raise IOError, particularly EPIPE"""
self.putter.stdin.write(data)
@require_open
def close(self):
try:
(stdout, stderr) = self.putter.communicate()
except IOError, ioe:
logging.debug("Saw IOError writing %r" % self.path, exc_info=1)
if ioe.errno == errno.EPIPE:
stdout, stderr = self.putter.communicate()
self.closed = True
if stderr:
LOG.warn("HDFS FileUpload (cmd='%s', env='%s') outputted stderr:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stderr))
if stdout:
LOG.info("HDFS FileUpload (cmd='%s', env='%s') outputted stdout:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stdout))
if self.putter.returncode != 0:
raise IOError("hdfs put returned bad code: %d\nstderr: %s" %
(self.putter.returncode, stderr))
LOG.info("Completed upload: %s" % repr(self.subprocess_cmd))
@require_open
def flush(self):
self.putter.stdin.flush()
def _block_contains_pos(block, pos):
return pos >= block.startOffset and pos < block.startOffset + block.numBytes
class BlockCache(object):
"""
A cache of block locations used by a single HDFS input file.
Essentially this keeps the blocks in sorted order and does
binary search to find the block that contains a given offset.
It also provides the ability to merge in the response of a NN
getBlocks response to the cache.
"""
def __init__(self):
self.blocks = []
def find_block(self, pos, _min_idx=0, _max_idx=None):
"""
Return the Block object that contains the specified
position pos, or None if it is not in the cache.
"""
if _max_idx is None:
_max_idx = len(self.blocks) - 1
if _max_idx < _min_idx:
return None
pivot_idx = (_max_idx + _min_idx) / 2
pivot_block = self.blocks[pivot_idx]
if pos < pivot_block.startOffset:
return self.find_block(pos, _min_idx, pivot_idx - 1)
elif pos >= pivot_block.startOffset + pivot_block.numBytes:
return self.find_block(pos, pivot_idx + 1, _max_idx)
else:
return pivot_block
def insert_new_blocks(self, new_blocks):
"""
Merge a list of Block objects from the NN into the list
of cached blocks.
If the set of blocks overlaps, the new blocks take precedence.
"""
# We could do a more efficient merge here since both lists
# are already sorted, but these data structures are small, so let's
# do the easy thing.
blocks_dict = dict( (b.blockId, b) for b in self.blocks )
# Merge in new data to dictionary
for nb in new_blocks:
blocks_dict[nb.blockId] = nb
# Convert back to sorted list
block_list = blocks_dict.values()
block_list.sort(cmp=lambda a,b: cmp(a.startOffset, b.startOffset))
# Update cache with new data
self.blocks = block_list
|
|
# -*- coding: utf-8 -*-
#
import warnings
import numpy
from . import color as mycol
def draw_legend(data, obj):
'''Adds legend code.
'''
texts = []
children_alignment = []
for text in obj.texts:
texts.append('%s' % text.get_text())
children_alignment.append('%s' % text.get_horizontalalignment())
cont = 'legend entries={{%s}}' % '},{'.join(texts)
data['extra axis options'].add(cont)
# Get the location.
# http://matplotlib.org/api/legend_api.html
pad = 0.03
# pylint: disable=protected-access
loc = obj._loc
if loc == 0:
# best
# Create a renderer
from matplotlib.backends import backend_agg
renderer = backend_agg.RendererAgg(width=obj.figure.get_figwidth(),
height=obj.figure.get_figheight(),
dpi=obj.figure.dpi)
# Rectangles of the legend and of the axes
# Lower left and upper right points
# pylint: disable=protected-access
x0_legend, x1_legend = \
obj._legend_box.get_window_extent(renderer).get_points()
x0_axes, x1_axes = obj.axes.get_window_extent(renderer).get_points()
dimension_legend = x1_legend - x0_legend
dimension_axes = x1_axes - x0_axes
# To determine the actual position of the legend, check which corner
# (or center) of the legend is closest to the corresponding corner
# (or center) of the axes box.
# 1. Key points of the legend
lower_left_legend = x0_legend
lower_right_legend = numpy.array([x1_legend[0], x0_legend[1]],
dtype=numpy.float_)
upper_left_legend = numpy.array([x0_legend[0], x1_legend[1]],
dtype=numpy.float_)
upper_right_legend = x1_legend
center_legend = x0_legend + dimension_legend / 2.
center_left_legend = numpy.array(
[x0_legend[0], x0_legend[1] + dimension_legend[1] / 2.],
dtype=numpy.float_)
center_right_legend = numpy.array(
[x1_legend[0], x0_legend[1] + dimension_legend[1] / 2.],
dtype=numpy.float_)
lower_center_legend = numpy.array(
[x0_legend[0] + dimension_legend[0] / 2., x0_legend[1]],
dtype=numpy.float_)
upper_center_legend = numpy.array(
[x0_legend[0] + dimension_legend[0] / 2., x1_legend[1]],
dtype=numpy.float_)
# 2. Key points of the axes
lower_left_axes = x0_axes
lower_right_axes = numpy.array([x1_axes[0], x0_axes[1]],
dtype=numpy.float_)
upper_left_axes = numpy.array([x0_axes[0], x1_axes[1]],
dtype=numpy.float_)
upper_right_axes = x1_axes
center_axes = x0_axes + dimension_axes / 2.
center_left_axes = numpy.array(
[x0_axes[0], x0_axes[1] + dimension_axes[1] / 2.],
dtype=numpy.float_)
center_right_axes = numpy.array(
[x1_axes[0], x0_axes[1] + dimension_axes[1] / 2.],
dtype=numpy.float_)
lower_center_axes = numpy.array(
[x0_axes[0] + dimension_axes[0] / 2., x0_axes[1]],
dtype=numpy.float_)
upper_center_axes = numpy.array(
[x0_axes[0] + dimension_axes[0] / 2., x1_axes[1]],
dtype=numpy.float_)
# 3. Compute the distances between comparable points.
distances = {
1: upper_right_axes - upper_right_legend, # upper right
2: upper_left_axes - upper_left_legend, # upper left
3: lower_left_axes - lower_left_legend, # lower left
4: lower_right_axes - lower_right_legend, # lower right
# 5:, Not Implemented # right
6: center_left_axes - center_left_legend, # center left
7: center_right_axes - center_right_legend, # center right
8: lower_center_axes - lower_center_legend, # lower center
9: upper_center_axes - upper_center_legend, # upper center
10: center_axes - center_legend # center
}
for k, v in distances.items():
distances[k] = numpy.linalg.norm(v, ord=2)
# 4. Take the shortest distance between key points as the final
# location
loc = min(distances, key=distances.get)
if loc == 1:
# upper right
position = None
anchor = None
elif loc == 2:
# upper left
position = [pad, 1.0 - pad]
anchor = 'north west'
elif loc == 3:
# lower left
position = [pad, pad]
anchor = 'south west'
elif loc == 4:
# lower right
position = [1.0 - pad, pad]
anchor = 'south east'
elif loc == 5:
# right
position = [1.0 - pad, 0.5]
anchor = 'east'
elif loc == 6:
# center left
position = [3 * pad, 0.5]
anchor = 'west'
elif loc == 7:
# center right
position = [1.0 - 3 * pad, 0.5]
anchor = 'east'
elif loc == 8:
# lower center
position = [0.5, 3 * pad]
anchor = 'south'
elif loc == 9:
# upper center
position = [0.5, 1.0 - 3 * pad]
anchor = 'north'
else:
assert loc == 10
# center
position = [0.5, 0.5]
anchor = 'center'
# In case of given position via bbox_to_anchor parameter the center
# of legend is changed as follows:
if obj._bbox_to_anchor:
bbox_center = obj.get_bbox_to_anchor()._bbox._points[1]
position = [bbox_center[0], bbox_center[1]]
legend_style = []
if position:
legend_style.append('at={(%.15g,%.15g)}' % (position[0], position[1]))
if anchor:
legend_style.append('anchor=%s' % anchor)
# Get the edgecolor of the box
if obj.get_frame_on():
edgecolor = obj.get_frame().get_edgecolor()
data, frame_xcolor, _ = mycol.mpl_color2xcolor(data, edgecolor)
if frame_xcolor != 'black': # black is default
legend_style.append('draw=%s' % frame_xcolor)
else:
legend_style.append('draw=none')
# Get the facecolor of the box
facecolor = obj.get_frame().get_facecolor()
data, fill_xcolor, _ = mycol.mpl_color2xcolor(data, facecolor)
if fill_xcolor != 'white': # white is default
legend_style.append('fill=%s' % fill_xcolor)
# Get the horizontal alignment
try:
alignment = children_alignment[0]
except IndexError:
alignment = None
for child_alignment in children_alignment:
if alignment != child_alignment:
warnings.warn(
'Varying horizontal alignments in the legend. Using default.'
)
alignment = None
break
if alignment:
data['extra axis options'].add(
'legend cell align={{{}}}'.format(alignment)
)
if obj._ncol != 1:
data['extra axis options'].add(
'legend columns={}'.format(obj._ncol)
)
# Set color of lines in legend
for handle in obj.legendHandles:
try:
data, legend_color, _ = mycol.mpl_color2xcolor(data,
handle.get_color())
data['legend colors'].append('\\addlegendimage{no markers, %s}\n'
% legend_color)
except AttributeError:
pass
# Write styles to data
if legend_style:
style = 'legend style={%s}' % ', '.join(legend_style)
data['extra axis options'].add(style)
return data
|
|
import warnings
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra',))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT column_name, data_type, character_maximum_length, numeric_precision,
numeric_scale, extra, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = force_text(line[0])
fields.append(
FieldInfo(*(
(col_name,) +
line[1:3] +
(
to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5],
line[6],
field_info[col_name].column_default,
field_info[col_name].extra,
)
))
)
return fields
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
|
|
#!/usr/bin/env python
"""
Package for interfacing with the Dark Energy Survey (DES) wikis.
Currently supports Redmine access through the `DESRedmine` class.
"""
import os
import sys
import re
import requests
import getpass
import logging
from collections import OrderedDict as odict
# The python-redmine API changes between v1.5 and v2.0
try:
import redmine
except ImportError:
import redminelib as redmine
# Utility Functions
def get_des_config(desfile=None):
"""
Simplified version of despyServiceAccess from DESDM.
Access file description in DESDM-3:
https://opensource.ncsa.illinois.edu/confluence/x/lwCsAw
"""
from ConfigParser import SafeConfigParser
if not desfile: desfile = os.getenv('DES_SERVICES')
if not desfile: desfile = os.path.join(os.getenv('HOME'),'.desservices.ini')
# ConfigParser throws confusing error if file doesn't exist
open(desfile)
config = SafeConfigParser()
config.read(desfile)
return config
def confirm(question,default=True):
"""
Simple function for getting yes/no response from raw_input
From: http://stackoverflow.com/a/3041990
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False,
True:True, False:False}
if default is None:
prompt = " [y/n] "
elif valid[default]:
prompt = " [Y/n] "
elif not valid[default]:
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower().strip()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class DESRedmine(redmine.Redmine):
baseurl = "https://cdcvs.fnal.gov/redmine"
def __init__(self,**kwargs):
""" Wrapper around redmine.Redmine """
kw = self.authenticate()
kw.update(**kwargs)
super(DESRedmine,self).__init__(**kw)
self.yes = kw.pop('yes',False)
def add_attachments(self,url,attachments,descriptions=None):
"""
Attach files to wiki page at the given url.
"""
page = self.wiki_page_from_url(url)
if isinstance(attachments,basestring):
attachments = [attachments]
default_desc='automated upload'
if descriptions is None:
descriptions = len(attachments)*[default_desc]
elif isinstance(descriptions,basestring):
descriptions = [descriptions]
uploads = []
for a,d in zip(attachments,descriptions):
uploads += [dict(path=a,filename=os.path.basename(a),description=d)]
fields = dict(
resource_id=page.internal_id,
project_id=page.manager.params.get('project_id',0),
text = page.text,
uploads = uploads,
)
logging.info("Attaching files:\n"+"\n".join(attachments))
return self.wiki_page.update(**fields)
def download_attachments(self,url,patterns=None):
"""
Download attachment(s) that match given pattern(s).
"""
attachments = self.attachments_from_patterns(url,patterns)
filenames = [a.filename for a in attachments]
# For large files:
# self.requests['stream'] = True
savepath = ''
for a in attachments:
outname = a.filename
if filenames.count(outname) > 1:
outname += '.%i'%a.internal_id
#print msg
if os.path.exists(os.path.join(savepath,outname)):
msg = "Found %s; skipping..."%outname
logging.info(msg)
continue
else:
msg = "Downloading %s..."%outname
logging.info(msg)
a.download(savepath='',filename=outname)
return True
def delete_attachments(self,url,patterns=None,force=False):
"""
Delete attachment(s) that match given pattern(s)
"""
LOGIN_BUTTON = "Login »"
# The Redmine API explicitly does not support DELETE requests
# for attachments. The work around is to start a login session.
session = requests.session()
loginurl = self.url + '/login'
token = self.get_authenticity_token(session.get(loginurl))
data = dict(username=self.username,password=self.password,
login=LOGIN_BUTTON,authenticity_token=token)
response = session.post(loginurl,data=data)
token = self.get_authenticity_token(response)
# Grab the attachments that we are going to delete
attachments = self.attachments_from_patterns(url,patterns)
if not len(attachments):
msg = "No matching attachments found."
raise Exception(msg)
for a in attachments:
data = dict(_method='delete',authenticity_token=token)
if not force:
question = "Delete '%s/%s'?"%(a.url,a.filename)
if not confirm(question,default=True): continue
msg = "Deleting %s/%s..."%(a.url,a.filename)
logging.info(msg)
r = session.post(a.url,data=data,cookies=response.cookies)
self.status_code(r)
session.close()
return True
def create_wiki_page(self,url,force=False,**kwargs):
"""
Tiny wrapper around `redmine.wiki_page.create`
"""
project_id,resource_id = self.parse_url(url)
text = self.parse_text(kwargs.pop('text',None))
fields = dict(resource_id=resource_id,project_id=project_id,
title=resource_id.replace('_',' '),text=text)
fields.update(**kwargs)
if not force:
question = "Create '%s'?"%url
if not confirm(question,default=False):
return None
try:
self.delete_wiki_page(url,force=force)
except redmine.exceptions.ResourceNotFoundError as e:
logging.info(str(e))
logging.info("Creating %s..."%url)
return self.wiki_page.create(**fields)
def delete_wiki_page(self,url,force=False,**kwargs):
"""
Tiny wrapper around `redmine.wiki_page.delete`.
"""
project_id,resource_id = self.parse_url(url)
fields = dict(resource_id=resource_id,project_id=project_id)
fields.update(**kwargs)
if not force:
question = "Delete '%s'?"%url
if not confirm(question,default=False):
return None
logging.info("Deleting %s..."%url)
return self.wiki_page.delete(**fields)
def attachments_from_patterns(self,url,patterns=None):
"""
Attachments with filenames that match the given pattern(s).
"""
if patterns is None:
patterns = ['']
if isinstance(patterns,basestring):
patterns = [patterns]
page = self.wiki_page_from_url(url)
attachments = page.attachments
filenames = [a.filename for a in attachments]
index = [i for p in patterns for i,f in enumerate(filenames) if re.match(p,f)]
return [attachments[i] for i in index]
def parse_url(self, url):
if not url.startswith(self.url):
msg = "Requested URL not in DES domain: %s"%url
raise Exception(msg)
# Stripped url should now be of the form:
# /projects/<project_id>/.../<resource_id>
content = url[len(self.url):].strip('/').split('/')
project_id = content[1]
resource_id = content[-1]
return project_id,resource_id
def wiki_page_from_url(self,url):
project_id,resource_id = self.parse_url(url)
fields = dict(resource_id=resource_id,project_id=project_id)
return self.wiki_page.get(**fields)
def authenticate(self,section='redmine-des'):
"""
Grab redmine authentication
"""
auth = odict(
url = self.baseurl,
key = None,
user = None,
passwd = None,
)
def defaults(): return auth
config = get_des_config()
config.defaults = defaults
if section in config.sections():
auth = dict([(k,config.get(section,k)) for k in auth.keys()])
if (auth['user'] is None) or (auth['passwd'] is None):
auth['user'] = raw_input('Username: ')
auth['passwd'] = getpass.getpass()
auth['username'] = auth.pop('user')
auth['password'] = auth.pop('passwd')
return auth
@staticmethod
def parse_text(text):
""" Translate list or filename into string for `text` argument. """
if not text:
text = ' '
if not isinstance(text,basestring):
text = ' '.join(text)
if os.path.exists(text):
text = open(text,'r').read()
return text
@staticmethod
def get_authenticity_token(response):
""" Get the CSRF authenticity token from a response """
pattern = 'meta content="(.*)" name="csrf-token"'
token = re.search(pattern,str(response.content)).group(1)
return token
@staticmethod
def status_code(response):
if response.status_code in (200, 201):
return response
elif response.status_code == 401:
raise redmine.exceptions.AuthError
elif response.status_code == 403:
raise redmine.exceptions.ForbiddenError
elif response.status_code == 404:
raise redmine.exceptions.ResourceNotFoundError
elif response.status_code == 409:
raise redmine.exceptions.ConflictError
elif response.status_code == 412:
raise redmine.exceptions.ImpersonateError
elif response.status_code == 413:
raise redmine.exceptions.RequestEntityTooLargeError
elif response.status_code == 422:
errors = response.json()['errors']
raise redmine.exceptions.ValidationError(to_string(', '.join(e if is_string(e) else ': '.join(e) for e in errors)))
elif response.status_code == 500:
raise redmine.exceptions.ServerError
raise redmine.exceptions.UnknownError(response.status_code)
if __name__ == "__main__":
import argparse
description = "python script"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v','--verbose',action='store_true')
opts = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Transforms a float-trained graph into an equivalent quantized version.
An example of command-line usage is:
bazel build tensorflow/contrib/quantization/tools/:quantize_graph \
&& bazel-bin/tensorflow/contrib/quantization/tools/quantize_graph \
--input=tensorflow_inception_graph.pb
--output_node_names="softmax2" --print_nodes --output=/tmp/quantized_graph.pb \
--mode=eightbit --logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import tensorflow as tf
from tensorflow.python.client import graph_util
from tensorflow.python.framework import tensor_util
# TODO(petewarden) - Remove this ugly hack to get around Python linking problems
# with Bazel.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.quantization import load_quantized_ops_so
from tensorflow.contrib.quantization.kernels import load_quantized_kernels_so
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""")
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output_node_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_integer("bitdepth", 8,
"""How many bits to quantize the graph to.""")
flags.DEFINE_string("mode", "round",
"""What transformation to apply (round, quantize,"""
""" eightbit, weights, or weights_rounded).""")
flags.DEFINE_string("test_input_dims", "1,224,224,3",
"""The size of the input tensor to use when testing a"""
""" graph loaded from a file.""")
flags.DEFINE_boolean("strip_redundant_quantization", True,
"""Removes redundant dequantize/quantize pairs.""")
def print_input_nodes(current_node, nodes_map, indent, already_visited):
print(" " * indent + current_node.op + ":" + current_node.name)
for input_node_name in current_node.input:
if input_node_name in already_visited:
continue
input_node = nodes_map[input_node_name]
print_input_nodes(input_node, nodes_map, indent + 1, already_visited)
already_visited[current_node.name] = True
def create_node(op, name, inputs):
new_node = tf.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node(name, value, dtype, shape=None):
node = create_node("Const", name, [])
set_attr_dtype(node, "dtype", dtype)
set_attr_tensor(node, "value", value, dtype, shape)
return node
def copy_attr(node, key, attr_value):
try:
node.attr[key].CopyFrom(attr_value)
except KeyError:
pass
def set_attr_dtype(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))
except KeyError:
pass
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
except KeyError:
pass
def set_attr_string(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(s=value))
except KeyError:
pass
def set_attr_int_list(node, key, value):
list_value = tf.AttrValue.ListValue(i=value)
try:
node.attr[key].CopyFrom(tf.AttrValue(list=list_value))
except KeyError:
pass
def set_attr_bool(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(b=value))
except KeyError:
pass
def set_attr_int(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(i=value))
except KeyError:
pass
def set_attr_float(node, key, value):
try:
node.attr[key].CopyFrom(tf.AttrValue(f=value))
except KeyError:
pass
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
m = re.search(r"(.*):\d+$", node_name)
if m:
name_with_port = node_name
else:
name_with_port = node_name + ":0"
return name_with_port
def unique_node_name_from_input(node_name):
"""Replaces invalid characters in input names to get a unique node name."""
return node_name.replace(":", "__port__").replace("^", "__hat__")
def quantize_array(arr, num_buckets):
"""Quantizes a numpy array.
This function maps each scalar in arr to the center of one of num_buckets
buckets. For instance,
quantize_array([0, 0.3, 0.6, 1], 2) => [0.25, 0.25, 0.75, 0.75]
Args:
arr: The numpy array to quantize.
num_buckets: The number of buckets to map "var" to.
Returns:
The quantized numpy array.
Raises:
ValueError: when num_buckets < 1.
"""
if num_buckets < 1:
raise ValueError("num_buckets must be >= 1")
arr_max = arr.max()
arr_min = arr.min()
if arr_max == arr_min:
return arr
bucket_width = (arr_max - arr_min) / num_buckets
# Map scalars to bucket indices. Take special care of max(arr).
bucket_indices = np.floor((arr - arr_min) / bucket_width)
bucket_indices[bucket_indices == num_buckets] = num_buckets - 1
# Map each scalar to the center of a bucket.
arr = arr_min + bucket_width * (bucket_indices + 0.5)
return arr
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
tensor_shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by intorducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(tensor_shape)
return [create_constant_node(input_node.name, tensor_value_rounded,
tf.float32, shape=tensor_shape_list)]
def quantize_weight_eightbit(input_node, quantization_mode):
"""Returns replacement nodes for input_node using the Dequantize op."""
base_name = input_node.name + "_"
quint8_const_name = base_name + "quint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(
input_node.attr["value"].tensor)
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# min_value == max_value is a tricky case. It can occur for general
# tensors, and of course for scalars. The quantized ops cannot deal
# with this case, so we set max_value to something else.
# It's a tricky question what is the numerically best solution to
# deal with this degeneracy.
# TODO(petewarden): Better use a tolerance than a hard comparison?
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
else:
max_value = 2 * min_value
sess = tf.Session()
with sess.as_default():
quantize_op = tf.contrib.quantization.python.quantize_v2(
float_tensor,
min_value,
max_value,
tf.quint8,
mode=quantization_mode)
quint8_tensor = quantize_op[0].eval()
shape = tensor_util.TensorShapeProtoToList(input_node.attr[
"value"].tensor.tensor_shape)
quint8_const_node = create_constant_node(quint8_const_name,
quint8_tensor,
tf.quint8,
shape=shape)
min_node = create_constant_node(min_name, min_value, tf.float32)
max_node = create_constant_node(max_name, max_value, tf.float32)
dequantize_node = create_node("Dequantize", input_node.name,
[quint8_const_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", quantization_mode)
return [quint8_const_node, min_node, max_node, dequantize_node]
class GraphRewriter(object):
"""Takes a float graph, and rewrites it in quantized form."""
def __init__(self, input_graph, mode):
"""Sets up the class to rewrite a float graph.
Args:
input_graph: A float graph to transform.
mode: A string controlling how quantization is performed -
round, quantize, eightbit, or weights.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
load_quantized_ops_so.Load()
load_quantized_kernels_so.Load()
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def rewrite(self, output_node_names):
"""Triggers rewriting of the float graph.
Args:
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
A quantized version of the float graph.
"""
self.output_graph = tf.GraphDef()
output_nodes = [self.nodes_map[output_node_name]
for output_node_name in output_node_names]
if self.mode == "round":
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif self.mode == "quantize":
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif self.mode == "eightbit":
self.set_input_graph(self.remove_unneeded_nodes(self.input_graph))
self.already_visited = {}
self.layers_eightbitized = []
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.output_graph = self.quantize_weights(self.output_graph, b"MIN_FIRST")
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights":
self.output_graph = self.quantize_weights(self.input_graph,
b"MIN_COMBINED")
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights_rounded":
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print("Bad mode - " + self.mode + ".")
return self.output_graph
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = tf.constant(levels, dtype=tf.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = tf.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
"""The entry point for quantizing nodes to eight bit and back."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
"""Handles quantizing a single node."""
input_name = input_node.name
if input_name in self.already_quantized:
return
self.already_quantized[input_name] = True
original_input_name = input_name + "_original"
reshape_name = input_name + "_reshape"
reshape_dims_name = input_name + "_reshape_dims"
max_name = input_name + "_max"
min_name = input_name + "_min"
dims_name = input_name + "_dims"
quantize_name = input_name + "_quantize"
dequantize_name = input_name
original_input_node = tf.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node("Reshape", reshape_name, [original_input_name,
reshape_dims_name])
set_attr_dtype(reshape_node, "T", tf.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, tf.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node("Max", max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, "T", tf.float32)
set_attr_bool(max_node, "keep_dims", False)
self.add_output_graph_node(max_node)
min_node = create_node("Min", min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, "T", tf.float32)
set_attr_bool(min_node, "keep_dims", False)
self.add_output_graph_node(min_node)
quantize_node = create_node("Quantize", quantize_name, [original_input_name,
min_name, max_name])
set_attr_dtype(quantize_node, "T", tf.quint8)
set_attr_string(quantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_node)
dequantize_node = create_node("Dequantize", dequantize_name,
[quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_nodes_recursively(self, current_node):
"""The entry point for transforming a graph into full eight bit."""
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
if input_node_name in self.already_visited:
continue
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.already_visited[current_node.name] = True
if current_node.op == "MatMul":
self.eightbitize_mat_mul_node(current_node)
elif current_node.op == "Conv2D":
self.eightbitize_conv_node(current_node)
self.layers_eightbitized.append(current_node.name)
elif current_node.op == "BiasAdd":
self.eightbitize_bias_add_node(current_node)
elif current_node.op == "MaxPool" or current_node.op == "AvgPool":
self.eightbitize_single_input_tensor_node(current_node,
self.add_pool_function)
elif current_node.op == "Relu" or current_node.op == "Relu6":
self.eightbitize_single_input_tensor_node(current_node,
self.add_relu_function)
elif current_node.op == "Concat":
self.eightbitize_concat_node(current_node)
elif current_node.op == "BatchNormWithGlobalNormalization":
self.eightbitize_batch_norm_node(current_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def add_eightbit_prologue_nodes(self, original_node):
"""Adds input conversion nodes to handle quantizing the underlying node."""
namespace_prefix = original_node.name + "_eightbit"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
[1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0, tf.int32,
[1])
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", tf.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(min_input_node, "T", tf.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name, [reshape_input_name,
reduction_dims_name])
set_attr_dtype(max_input_node, "T", tf.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node("QuantizeV2", quantize_input_name,
[original_input_name, min_input_name,
max_input_name])
set_attr_dtype(quantize_input_node, "T", tf.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
def add_quantize_down_node(self, original_node, quantized_output_name):
quantize_down_name = original_node.name + "_eightbit_quantize_down"
quantize_down_node = create_node(
"QuantizeDownAndShrinkRange", quantize_down_name,
[quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"])
set_attr_dtype(quantize_down_node, "Tinput", tf.qint32)
set_attr_dtype(quantize_down_node, "out_type", tf.quint8)
self.add_output_graph_node(quantize_down_node)
return quantize_down_name
def add_dequantize_result_node(self, quantized_output_name,
original_node_name):
dequantize_name = original_node_name
dequantize_node = create_node("Dequantize", dequantize_name,
[quantized_output_name,
quantized_output_name + ":1",
quantized_output_name + ":2"])
set_attr_dtype(dequantize_node, "T", tf.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_bias_add"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node(
"QuantizedMatMul", quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", tf.quint8)
set_attr_dtype(quantized_conv_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (original_node.name +
"_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node(
"QuantizedBiasAdd", quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
"""Replaces a single-tensor node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input(f)
| |
+--------v v
Operation
|
v
(f)
Into a quantized equivalent:
Input(f) ReshapeDims
+------v v-------------+
| Reshape
| |
| | ReductionDims
| +-----+ |
| | +---c---------+
| v v v v-------+
| Min Max
| +----+ |
v v v--------+
Quantize
|
v
QuantizedOperation
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
add_op_function: Function to create the actual node.
Returns:
Subgraph representing the quantized version of the original node.
"""
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(
quantized_op_type, quantized_op_name, all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", tf.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", tf.quint8)
def eightbitize_concat_node(self, original_node):
"""Replaces a Concat node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input0(f) Input1(f)
| | |
+--------v v v----------+
Concat
|
v
(f)
Into a quantized equivalent:
Shape(f) Input0(f) ReshapeDims Input1(f)
| +------v v--------------+------------------v v------+
| | Reshape Reshape |
| | | | |
| | | ReductionDims | |
| | +------+ | +--------+ |
| | | +---c---------+-----------c-----+ | |
| | +v v v v-------+---------v v v v+ |
| | Min Max Min Max |
| | +----+ | | +-----+ |
| v v v--------+ +----------v v v
| Quantize Quantize
| +------------------+ +----------------------+
+-------------------------------+ | |
v v v
QuantizedConcat
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_concat_name = namespace_prefix + "_quantized_concat"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node(
"QuantizedConcat", quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, "N", len(original_inputs))
set_attr_dtype(quantized_concat_node, "T", tf.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name, quantize_gamma_name,
min_gamma_name, max_gamma_name])
set_attr_dtype(quantized_batch_norm_node, "Tinput", tf.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", tf.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_node(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
"""Removes unneeded pairs of quantize/dequantize ops from the graph.
This is a bit of a tricky function, because it's attempting to spot the
pattern of dequantizing from eight-bit up to float, and then immediately
quantizing back down to eight bits again, that's introduced by previous
passes that do 'key-hole' conversions of individual nodes but have to
convert back to float to match the previous output interface, since they
don't know that the next op can handle quantized tensors.
It works by:
- Looking for Quantize nodes.
- Checking to see if their first input is a Dequantize node.
- Seeing if their min/max inputs come from Min/Max nodes.
- Making sure those Min/Max nodes are being fed from the same Dequantize.
- Or that the Min is indirectly being fed from the same Dequantize as Max.
- Making sure the Dequantize is going through a Reshape (which we add
during the previous pass when we create the quantize sub-graph).
- Looking for the dims Const op for the Min/Max dims.
If all of these conditions are met, then it's a sub-graph pattern that
we know how to optimize out (and is likely the common one we've introduced).
We then rewire the graph to skip it entirely, and then rely on the dead node
removal pass to get rid of any nodes that are no longer needed.
Args:
old_graph: The model we'll be stripping redundant nodes from.
Returns:
A graph with the unnecessary nodes removed.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = tf.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." % (
min_node.op, max_node.op))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = node_name_from_input(
second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph,
output_names)
def quantize_weights(self, input_graph, quantization_mode):
"""Quantize float Const ops.
There are two modes of operations, both replace float Const ops with
quantized values.
1. If quantization_mode is "weights_rounded", this function replaces float
Const ops with quantized float Const ops - same as the original op, but
float values being mapped to the center of one of 1<<FLAGS.bitdepth buckets.
This does not change the raw model size, but compression algorithms such as
zip (as used for compressing apks) or bzip2 will achieve a very good
compression ratio.
2. For other quantization modes ("MIN_COMBINED" or "MIN_FIRST"), float
Const ops are quantized and replaced by a tuple of four ops to perform
the dequantization at runtime:
* eight-bit Const (bucket indices, same shape as original float Const op
* two float Const ops (min and max value of original float Const op)
* Dequantize op to convert the eight-bit consts to float tensors.
The quantization mode is important because we see accuracy problems when
quantizing weights for different situations depending on the algorithm
used. We haven't figured out exactly what the underlying cause is yet,
unfortunately.
Args:
input_graph: A GraphDef of the model containing float Const ops.
quantization_mode: How to quantize and dequantize the values.
Returns:
A GraphDef of the converted graph.
Raises:
ValueError: If quantization_mode is unsupported.
"""
output_graph = tf.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if input_node.op == "Const":
dtype = tf.as_dtype(input_node.attr["dtype"].type)
if dtype == tf.float32:
should_quantize = True
if should_quantize:
if quantization_mode == "weights_rounded":
output_graph.node.extend(quantize_weight_rounded(input_node))
elif quantization_mode in (b"MIN_COMBINED", b"MIN_FIRST"):
output_graph.node.extend(quantize_weight_eightbit(input_node,
quantization_mode))
else:
raise ValueError("Unsupported quantization mode %s." %
quantization_mode)
else:
output_node = tf.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def remove_unneeded_nodes(self, input_graph):
"""Prunes out nodes that aren't needed for inference.
There are nodes like Identity and CheckNumerics that are only useful
during training, and can be removed in graphs that will be used for
nothing but inference. Here we identify and remove them, returning an
equivalent graph.
Args:
input_graph: Model to analyze and prune.
Returns:
A list of nodes with the unnecessary ones removed.
"""
types_to_remove = {"CheckNumerics": True}
input_nodes = input_graph.node
names_to_remove = {}
for node in input_nodes:
if node.op in types_to_remove:
names_to_remove[node.name] = True
nodes_after_removal = []
for node in input_nodes:
if node.name in names_to_remove:
continue
new_node = tf.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_remove:
continue
new_node.input.append(full_input_name)
nodes_after_removal.append(new_node)
types_to_splice = {"Identity": True}
names_to_splice = {}
for node in nodes_after_removal:
if node.op in types_to_splice:
# We don't want to remove nodes that have control edge inputs, because
# they might be involved in subtle dependency issues that removing them
# will jeopardize.
has_control_edge = False
for input_name in node.input:
if re.match(r"^\^", input_name):
has_control_edge = True
if not has_control_edge:
names_to_splice[node.name] = node.input[0]
nodes_after_splicing = []
for node in nodes_after_removal:
if node.name in names_to_splice:
continue
new_node = tf.NodeDef()
new_node.CopyFrom(node)
input_before_removal = node.input
del new_node.input[:]
for full_input_name in input_before_removal:
input_name = re.sub(r"^\^", "", full_input_name)
if input_name in names_to_splice:
new_node.input.append(names_to_splice[input_name])
else:
new_node.input.append(full_input_name)
nodes_after_splicing.append(new_node)
output_graph = tf.GraphDef()
output_graph.node.extend(nodes_after_splicing)
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph)
def main(unused_args):
if not tf.gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
known_modes = ["round", "quantize", "eightbit", "weights", "test",
"weights_rounded"]
if not any(FLAGS.mode in s for s in known_modes):
print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
".")
return -1
tf_graph = tf.GraphDef()
with tf.gfile.Open(FLAGS.input, "r") as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(tf_graph, input_map={}, name="")
rewriter = GraphRewriter(tf_graph, FLAGS.mode)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = tf.gfile.FastGFile(FLAGS.output, "w")
f.write(output_graph.SerializeToString())
return 0
if __name__ == "__main__":
tf.app.run()
|
|
import sys
import asyncio
import pulsar
from pulsar.utils.internet import nice_address, format_address
from .futures import multi_async, task, Future, ensure_future
from .events import EventHandler, AbortEvent
from .mixins import FlowControl, Timeout
__all__ = ['ProtocolConsumer',
'Protocol',
'DatagramProtocol',
'Connection',
'Producer',
'TcpServer',
'DatagramServer',
'AbortRequest']
class AbortRequest(AbortEvent):
pass
class ProtocolConsumer(EventHandler):
"""The consumer of data for a server or client :class:`.Connection`.
It is responsible for receiving incoming data from an end point via the
:meth:`Connection.data_received` method, decoding (parsing) and,
possibly, writing back to the client or server via
the :attr:`transport` attribute.
.. note::
For server consumers, :meth:`data_received` is the only method
to implement.
For client consumers, :meth:`start_request` should also be implemented.
A :class:`ProtocolConsumer` is a subclass of :class:`.EventHandler` and it
has two default :ref:`one time events <one-time-event>`:
* ``pre_request`` fired when the request is received (for servers) or
just before is sent (for clients).
This occurs just before the :meth:`start_request` method.
* ``post_request`` fired when the request is done. The
:attr:`on_finished` attribute is a shortcut for the ``post_request``
:class:`.OneTime` event and therefore can be used to wait for
the request to have received a full response (clients).
In addition, it has two :ref:`many times events <many-times-event>`:
* ``data_received`` fired when new data is received from the transport but
not yet processed (before the :meth:`data_received` method is invoked)
* ``data_processed`` fired just after data has been consumed (after the
:meth:`data_received` method)
.. note::
A useful example on how to use the ``data_received`` event is
the :ref:`wsgi proxy server <tutorials-proxy-server>`.
"""
_connection = None
_data_received_count = 0
ONE_TIME_EVENTS = ('pre_request', 'post_request')
MANY_TIMES_EVENTS = ('data_received', 'data_processed')
@property
def connection(self):
"""The :class:`Connection` of this consumer.
"""
return self._connection
@property
def request(self):
"""The request.
Used for clients only and available only after the
:meth:`start` method is invoked.
"""
return getattr(self, '_request', None)
@property
def transport(self):
"""The :class:`Transport` of this consumer
"""
if self._connection:
return self._connection.transport
@property
def address(self):
if self._connection:
return self._connection.address
@property
def producer(self):
"""The :class:`Producer` of this consumer.
"""
if self._connection:
return self._connection.producer
@property
def on_finished(self):
"""Event fired once a full response to a request is received. It is
the ``post_request`` one time event.
"""
return self.event('post_request')
def connection_made(self, connection):
"""Called by a :class:`Connection` when it starts using this consumer.
By default it does nothing.
"""
def data_received(self, data):
"""Called when some data is received.
**This method must be implemented by subclasses** for both server and
client consumers.
The argument is a bytes object.
"""
def start_request(self):
"""Starts a new request.
Invoked by the :meth:`start` method to kick start the
request with remote server. For server :class:`ProtocolConsumer` this
method is not invoked at all.
**For clients this method should be implemented** and it is critical
method where errors caused by stale socket connections can arise.
**This method should not be called directly.** Use :meth:`start`
instead. Typically one writes some data from the :attr:`request`
into the transport. Something like this::
self.transport.write(self.request.encode())
"""
raise NotImplementedError
def start(self, request=None):
"""Starts processing the request for this protocol consumer.
There is no need to override this method,
implement :meth:`start_request` instead.
If either :attr:`connection` or :attr:`transport` are missing, a
:class:`RuntimeError` occurs.
For server side consumer, this method simply fires the ``pre_request``
event.
"""
if hasattr(self, '_request'):
raise RuntimeError('%s already requested %s' %
(self, self._request))
conn = self._connection
if not conn:
raise RuntimeError('Cannot start new request. No connection.')
if not conn._transport:
raise RuntimeError('%s has no transport.' % conn)
conn._processed += 1
if conn._producer:
p = getattr(conn._producer, '_requests_processed', 0)
conn._producer._requests_processed = p + 1
self.bind_event('post_request', self._finished)
self._request = request
return ensure_future(self._start(), loop=self._loop)
def abort_request(self):
"""Abort the request.
This method can be called during the pre-request stage
"""
future = self.events['pre_request']
if future.done():
raise RuntimeError('Request already sent')
future.add_done_callback(self._abort_request)
raise AbortRequest
@asyncio.coroutine
def _start(self):
try:
yield from self.fire_event('pre_request')
except AbortEvent:
self.logger.debug('Abort request %s', self.request)
else:
if self._request is not None:
try:
self.start_request()
except Exception as exc:
self.finished(exc=exc)
def connection_lost(self, exc):
"""Called by the :attr:`connection` when the transport is closed.
By default it calls the :meth:`finished` method. It can be overwritten
to handle the potential exception ``exc``.
"""
# TODO: decide how to handle connection_lost when no exception occurs
# Set the first positional parameter to None so that if the
# connection was dropped without exception it returns None
# rather than the protocol consumer
return self.finished(None)
def finished(self, *arg, **kw):
"""Fire the ``post_request`` event if it wasn't already fired.
"""
if not self.event('post_request').fired():
return self.fire_event('post_request', *arg, **kw)
def write(self, data):
"""Delegate writing to the underlying :class:`.Connection`
Return an empty tuple or a :class:`~asyncio.Future`
"""
c = self._connection
if c:
return c.write(data)
else:
raise RuntimeError('No connection')
def _data_received(self, data):
# Called by Connection, it updates the counters and invoke
# the high level data_received method which must be implemented
# by subclasses
if not hasattr(self, '_request'):
self.start()
self._data_received_count += 1
self.fire_event('data_received', data=data)
result = self.data_received(data)
self.fire_event('data_processed', data=data)
return result
def _finished(self, _, exc=None):
c = self._connection
if c and c._current_consumer is self:
c._current_consumer = None
@task
def _abort_request(self, fut):
exc = fut.exception()
for event in self.ONE_TIME_EVENTS:
if not self.event(event).fired():
try:
yield from self.fire_event(event, exc=exc)
except AbortRequest:
pass
class PulsarProtocol(EventHandler, FlowControl):
"""A mixin class for both :class:`.Protocol` and
:class:`.DatagramProtocol`.
A :class:`PulsarProtocol` is an :class:`.EventHandler` which has
two :ref:`one time events <one-time-event>`:
* ``connection_made``
* ``connection_lost``
"""
ONE_TIME_EVENTS = ('connection_made', 'connection_lost')
MANY_TIMES_EVENTS = ('data_received', 'data_processed',
'before_write', 'after_write')
_transport = None
_address = None
def __init__(self, loop, session=1, producer=None, logger=None, **kw):
super().__init__(loop)
FlowControl.__init__(self, **kw)
self._logger = logger
self._session = session
self._producer = producer
def __repr__(self):
address = self._address
if address:
return '%s session %s' % (nice_address(address), self._session)
else:
return '<pending> session %s' % self._session
__str__ = __repr__
@property
def session(self):
"""Connection session number.
Passed during initialisation by the :attr:`producer`.
Usually an integer representing the number of separate connections
the producer has processed at the time it created this
:class:`Protocol`.
"""
return self._session
@property
def transport(self):
"""The :ref:`transport <asyncio-transport>` for this protocol.
Available once the :meth:`connection_made` is called.
"""
return self._transport
@property
def sock(self):
"""The socket of :attr:`transport`.
"""
if self._transport:
return self._transport.get_extra_info('socket')
@property
def address(self):
"""The address of the :attr:`transport`.
"""
return self._address
@property
def producer(self):
"""The producer of this :class:`Protocol`.
"""
return self._producer
@property
def closed(self):
"""``True`` if the :attr:`transport` is closed.
"""
if self._transport:
if not getattr(self._transport, '_closing', False):
return False
return True
return True
def close(self):
"""Close by closing the :attr:`transport`
Return the ``connection_lost`` event which can be used to wait
for complete transport closure.
"""
if self._transport:
if self.debug:
self.logger.debug('Closing connection %s', self)
if self._transport.can_write_eof():
try:
self._transport.write_eof()
except Exception:
pass
try:
self._transport.close()
except Exception:
pass
return self.event('connection_lost')
def abort(self):
"""Abort by aborting the :attr:`transport`
"""
if self._transport:
self._transport.abort()
def connection_made(self, transport):
"""Sets the :attr:`transport`, fire the ``connection_made`` event
and adds a :attr:`timeout` for idle connections.
"""
self._transport = transport
addr = self._transport.get_extra_info('peername')
if not addr:
addr = self._transport.get_extra_info('sockname')
self._address = addr
# let everyone know we have a connection with endpoint
self.fire_event('connection_made')
def connection_lost(self, exc=None):
"""Fires the ``connection_lost`` event.
"""
if self.debug and not exc:
self.logger.debug('Lost connection %s', self)
self.fire_event('connection_lost')
def eof_received(self):
"""The socket was closed from the remote end
"""
def info(self):
info = {'connection': {'session': self._session}}
if self._producer:
info.update(self._producer.info())
return info
class Protocol(PulsarProtocol, asyncio.Protocol):
"""An :class:`asyncio.Protocol` with :ref:`events <event-handling>`
"""
_data_received_count = 0
def write(self, data):
"""Write ``data`` into the wire.
Returns an empty tuple or a :class:`~asyncio.Future` if this
protocol has paused writing.
"""
t = self._transport
if t:
if self._paused:
# # Uses private variable once again!
# This occurs when the protocol is paused from writing
# but another data ready callback is fired in the same
# event-loop frame
self.logger.debug('protocol cannot write, add data to the '
'transport buffer')
t._buffer.extend(data)
else:
self.fire_event('before_write')
t.write(data)
self.fire_event('after_write')
return self._write_waiter or ()
else:
raise ConnectionResetError('No Transport')
class DatagramProtocol(PulsarProtocol, asyncio.DatagramProtocol):
"""An ``asyncio.DatagramProtocol`` with events`
"""
class Connection(Protocol, Timeout):
"""A :class:`.FlowControl` to handle multiple TCP requests/responses.
It is a class which acts as bridge between a
:ref:`transport <asyncio-transport>` and a :class:`.ProtocolConsumer`.
It routes data arriving from the transport to the
:meth:`current_consumer`.
.. attribute:: _consumer_factory
A factory of :class:`.ProtocolConsumer`.
.. attribute:: _processed
number of separate requests processed.
"""
def __init__(self, consumer_factory=None, timeout=None,
low_limit=None, high_limit=None, **kw):
super().__init__(**kw)
self.bind_event('connection_lost', self._connection_lost)
self._processed = 0
self._current_consumer = None
self._consumer_factory = consumer_factory
self.timeout = timeout
@property
def requests_processed(self):
return self._processed
def current_consumer(self):
"""The :class:`ProtocolConsumer` currently handling incoming data.
This instance will receive data when this connection get data
from the :attr:`~PulsarProtocol.transport` via the
:meth:`data_received` method.
If no consumer is available, build a new one and return it.
"""
if self._current_consumer is None:
self._build_consumer(None)
return self._current_consumer
def data_received(self, data):
"""Delegates handling of data to the :meth:`current_consumer`.
Once done set a timeout for idle connections when a
:attr:`~Protocol.timeout` is a positive number (of seconds).
"""
self._data_received_count = self._data_received_count + 1
self.fire_event('data_received', data=data)
toprocess = data
while toprocess:
consumer = self.current_consumer()
toprocess = consumer._data_received(toprocess)
if isinstance(toprocess, Future):
break
self.fire_event('data_processed', data=data)
def upgrade(self, consumer_factory):
"""Upgrade the :func:`_consumer_factory` callable.
This method can be used when the protocol specification changes
during a response (an example is a WebSocket request/response,
or HTTP tunneling).
This method adds a ``post_request`` callback to the
:meth:`current_consumer` to build a new consumer with the new
:func:`_consumer_factory`.
:param consumer_factory: the new consumer factory (a callable
accepting no parameters)
:return: ``None``.
"""
self._consumer_factory = consumer_factory
consumer = self._current_consumer
if consumer:
consumer.bind_event('post_request', self._build_consumer)
else:
self._build_consumer(None)
def info(self):
info = super().info()
c = info['connection']
c['request_processed'] = self._processed
c['data_processed_count'] = self._data_received_count
c['timeout'] = self.timeout
return info
def _build_consumer(self, _, exc=None):
if not exc or isinstance(exc, AbortEvent):
consumer = self._producer.build_consumer(self._consumer_factory)
assert self._current_consumer is None, 'Consumer is not None'
self._current_consumer = consumer
consumer._connection = self
consumer.connection_made(self)
def _connection_lost(self, _, exc=None):
"""It performs these actions in the following order:
* Fires the ``connection_lost`` :ref:`one time event <one-time-event>`
if not fired before, with ``exc`` as event data.
* Cancel the idle timeout if set.
* Invokes the :meth:`ProtocolConsumer.connection_lost` method in the
:meth:`current_consumer`.
"""
if self._current_consumer:
self._current_consumer.connection_lost(exc)
class Producer(EventHandler):
"""An Abstract :class:`.EventHandler` class for all producers of
socket (client and servers)
"""
protocol_factory = None
"""A callable producing protocols.
The signature of the protocol factory callable must be::
protocol_factory(session, producer, **params)
"""
def __init__(self, loop=None, protocol_factory=None, name=None,
max_requests=None, logger=None):
super().__init__(loop or asyncio.get_event_loop())
self.protocol_factory = protocol_factory or self.protocol_factory
self._name = name or self.__class__.__name__
self._requests_processed = 0
self._sessions = 0
self._max_requests = max_requests
self._logger = logger
@property
def sessions(self):
"""Total number of protocols created by the :class:`Producer`.
"""
return self._sessions
@property
def requests_processed(self):
"""Total number of requests processed.
"""
return self._requests_processed
def create_protocol(self, **kw):
"""Create a new protocol via the :meth:`protocol_factory`
This method increase the count of :attr:`sessions` and build
the protocol passing ``self`` as the producer.
"""
self._sessions = self._sessions + 1
kw['session'] = self.sessions
kw['producer'] = self
kw['loop'] = self._loop
kw['logger'] = self._logger
return self.protocol_factory(**kw)
def build_consumer(self, consumer_factory):
"""Build a consumer for a protocol.
This method can be used by protocols which handle several requests,
for example the :class:`Connection` class.
:param consumer_factory: consumer factory to use.
"""
consumer = consumer_factory(loop=self._loop)
consumer._logger = self._logger
consumer.copy_many_times_events(self)
return consumer
class TcpServer(Producer):
"""A :class:`.Producer` of server :class:`Connection` for TCP servers.
.. attribute:: _server
A :class:`.Server` managed by this Tcp wrapper.
Available once the :meth:`start_serving` method has returned.
"""
ONE_TIME_EVENTS = ('start', 'stop')
MANY_TIMES_EVENTS = ('connection_made', 'pre_request', 'post_request',
'connection_lost')
_server = None
_started = None
def __init__(self, protocol_factory, loop, address=None,
name=None, sockets=None, max_requests=None,
keep_alive=None, logger=None):
super().__init__(loop, protocol_factory, name=name,
max_requests=max_requests, logger=logger)
self._params = {'address': address, 'sockets': sockets}
self._keep_alive = max(keep_alive or 0, 0)
self._concurrent_connections = set()
def __repr__(self):
address = self.address
if address:
return '%s %s' % (self.__class__.__name__, address)
else:
return self.__class__.__name__
__str_ = __repr__
@property
def address(self):
"""Socket address of this server.
It is obtained from the first socket ``getsockname`` method.
"""
if self._server is not None:
return self._server.sockets[0].getsockname()
@task
def start_serving(self, backlog=100, sslcontext=None):
"""Start serving.
:param backlog: Number of maximum connections
:param sslcontext: optional SSLContext object.
:return: a :class:`.Future` called back when the server is
serving the socket.
"""
assert not self._server
if hasattr(self, '_params'):
address = self._params['address']
sockets = self._params['sockets']
del self._params
create_server = self._loop.create_server
try:
if sockets:
server = None
for sock in sockets:
srv = yield from create_server(self.create_protocol,
sock=sock,
backlog=backlog,
ssl=sslcontext)
if server:
server.sockets.extend(srv.sockets)
else:
server = srv
else:
if isinstance(address, tuple):
server = yield from create_server(self.create_protocol,
host=address[0],
port=address[1],
backlog=backlog,
ssl=sslcontext)
else:
raise NotImplementedError
self._server = server
self._started = self._loop.time()
for sock in server.sockets:
address = sock.getsockname()
self.logger.info('%s serving on %s', self._name,
format_address(address))
self._loop.call_soon(self.fire_event, 'start')
except Exception as exc:
self.fire_event('start', exc=exc)
def close(self):
"""Stop serving the :attr:`.Server.sockets`.
"""
if self._server:
server, self._server = self._server, None
return self._close(server)
elif not self.fired_event('stop'):
return self.fire_event('stop')
def info(self):
sockets = []
up = int(self._loop.time() - self._started) if self._started else 0
server = {'pulsar_version': pulsar.__version__,
'python_version': sys.version,
'uptime_in_seconds': up,
'sockets': sockets,
'max_requests': self._max_requests,
'keep_alive': self._keep_alive}
clients = {'processed_clients': self._sessions,
'connected_clients': len(self._concurrent_connections),
'requests_processed': self._requests_processed}
if self._server:
for sock in self._server.sockets:
sockets.append({
'address': format_address(sock.getsockname())})
return {'server': server,
'clients': clients}
def create_protocol(self):
"""Override :meth:`Producer.create_protocol`.
"""
protocol = super().create_protocol(timeout=self._keep_alive)
protocol.bind_event('connection_made', self._connection_made)
protocol.bind_event('connection_lost', self._connection_lost)
protocol.copy_many_times_events(self)
if (self._server and self._max_requests and
self._sessions >= self._max_requests):
self.logger.info('Reached maximum number of connections %s. '
'Stop serving.' % self._max_requests)
self.close()
return protocol
# INTERNALS
def _connection_made(self, connection, exc=None):
if not exc:
self._concurrent_connections.add(connection)
def _connection_lost(self, connection, exc=None):
self._concurrent_connections.discard(connection)
def _close_connections(self, connection=None):
"""Close ``connection`` if specified, otherwise close all connections.
Return a list of :class:`.Future` called back once the connection/s
are closed.
"""
all = []
if connection:
all.append(connection.event('connection_lost'))
connection.close()
else:
connections = list(self._concurrent_connections)
self._concurrent_connections = set()
for connection in connections:
all.append(connection.event('connection_lost'))
connection.close()
if all:
self.logger.info('%s closing %d connections', self, len(all))
return multi_async(all)
@task
def _close(self, server):
"""Stop serving the :attr:`.Server.sockets` and close all
concurrent connections.
"""
server.close()
coro = self._close_connections()
if coro:
yield from coro
self.fire_event('stop')
class DatagramServer(Producer):
"""An :class:`.Producer` for serving UDP sockets.
.. attribute:: _transports
A list of :class:`.DatagramTransport`.
Available once the :meth:`create_endpoint` method has returned.
"""
_transports = None
_started = None
ONE_TIME_EVENTS = ('start', 'stop')
MANY_TIMES_EVENTS = ('pre_request', 'post_request')
def __init__(self, protocol_factory, loop=None, address=None,
name=None, sockets=None, max_requests=None,
logger=None):
super().__init__(loop, protocol_factory, name=name,
max_requests=max_requests, logger=logger)
self._params = {'address': address, 'sockets': sockets}
@task
def create_endpoint(self, **kw):
"""create the server endpoint.
:return: a :class:`~asyncio.Future` called back when the server is
serving the socket.
"""
if hasattr(self, '_params'):
address = self._params['address']
sockets = self._params['sockets']
del self._params
try:
transports = []
if sockets:
for transport in sockets:
proto = self.create_protocol()
transports.append(transport(self._loop, proto))
else:
loop = self._loop
transport, _ = yield from loop.create_datagram_endpoint(
self.protocol_factory, local_addr=address)
transports.append(transport)
self._transports = transports
self._started = self._loop.time()
for transport in self._transports:
address = transport.get_extra_info('sockname')
self.logger.info('%s serving on %s', self._name,
format_address(address))
self.fire_event('start')
except Exception as exc:
self.logger.exception('Error while starting UDP server')
self.fire_event('start', exc=exc)
self.fire_event('stop')
@task
def close(self):
"""Stop serving the :attr:`.Server.sockets` and close all
concurrent connections.
"""
if not self.fired_event('stop'):
transports, self._transports = self._transports, None
if transports:
for transport in transports:
transport.close()
self.fire_event('stop')
def info(self):
sockets = []
up = int(self._loop.time() - self._started) if self._started else 0
server = {'pulsar_version': pulsar.__version__,
'python_version': sys.version,
'uptime_in_seconds': up,
'sockets': sockets,
'max_requests': self._max_requests}
clients = {'requests_processed': self._requests_processed}
if self._transports:
for transport in self._transports:
sockets.append({
'address': format_address(transport._sock.getsockname())})
return {'server': server,
'clients': clients}
|
|
"""
Implement -f aka looponfailing for py.test.
NOTE that we try to avoid loading and depending on application modules
within the controlling process (the one that starts repeatedly test
processes) otherwise changes to source code can crash
the controlling process which should best never happen.
"""
import py, pytest
import sys
import execnet
def pytest_addoption(parser):
group = parser.getgroup("xdist", "distributed and subprocess testing")
group._addoption('-f', '--looponfail',
action="store_true", dest="looponfail", default=False,
help="run tests in subprocess, wait for modified files "
"and re-run failing test set until all pass.")
def pytest_cmdline_main(config):
if config.getoption("looponfail"):
looponfail_main(config)
return 2 # looponfail only can get stop with ctrl-C anyway
def looponfail_main(config):
remotecontrol = RemoteControl(config)
rootdirs = config.getini("looponfailroots")
statrecorder = StatRecorder(rootdirs)
try:
while 1:
remotecontrol.loop_once()
if not remotecontrol.failures and remotecontrol.wasfailing:
continue # the last failures passed, let's immediately rerun all
repr_pytest_looponfailinfo(
failreports=remotecontrol.failures,
rootdirs=rootdirs)
statrecorder.waitonchange(checkinterval=2.0)
except KeyboardInterrupt:
print()
class RemoteControl(object):
def __init__(self, config):
self.config = config
self.failures = []
def trace(self, *args):
if self.config.option.debug:
msg = " ".join([str(x) for x in args])
py.builtin.print_("RemoteControl:", msg)
def initgateway(self):
return execnet.makegateway("popen")
def setup(self, out=None):
if out is None:
out = py.io.TerminalWriter()
if hasattr(self, 'gateway'):
raise ValueError("already have gateway %r" % self.gateway)
self.trace("setting up slave session")
self.gateway = self.initgateway()
self.channel = channel = self.gateway.remote_exec(init_slave_session,
args=self.config.args,
option_dict=vars(self.config.option),
)
remote_outchannel = channel.receive()
def write(s):
out._file.write(s)
out._file.flush()
remote_outchannel.setcallback(write)
def ensure_teardown(self):
if hasattr(self, 'channel'):
if not self.channel.isclosed():
self.trace("closing", self.channel)
self.channel.close()
del self.channel
if hasattr(self, 'gateway'):
self.trace("exiting", self.gateway)
self.gateway.exit()
del self.gateway
def runsession(self):
try:
self.trace("sending", self.failures)
self.channel.send(self.failures)
try:
return self.channel.receive()
except self.channel.RemoteError:
e = sys.exc_info()[1]
self.trace("ERROR", e)
raise
finally:
self.ensure_teardown()
def loop_once(self):
self.setup()
self.wasfailing = self.failures and len(self.failures)
result = self.runsession()
failures, reports, collection_failed = result
if collection_failed:
pass # "Collection failed, keeping previous failure set"
else:
uniq_failures = []
for failure in failures:
if failure not in uniq_failures:
uniq_failures.append(failure)
self.failures = uniq_failures
def repr_pytest_looponfailinfo(failreports, rootdirs):
tr = py.io.TerminalWriter()
if failreports:
tr.sep("#", "LOOPONFAILING", bold=True)
for report in failreports:
if report:
tr.line(report, red=True)
tr.sep("#", "waiting for changes", bold=True)
for rootdir in rootdirs:
tr.line("### Watching: %s" %(rootdir,), bold=True)
def init_slave_session(channel, args, option_dict):
import os, sys
outchannel = channel.gateway.newchannel()
sys.stdout = sys.stderr = outchannel.makefile('w')
channel.send(outchannel)
# prune sys.path to not contain relative paths
newpaths = []
for p in sys.path:
if p:
if not os.path.isabs(p):
p = os.path.abspath(p)
newpaths.append(p)
sys.path[:] = newpaths
#fullwidth, hasmarkup = channel.receive()
from _pytest.config import Config
config = Config.fromdictargs(option_dict, list(args))
config.args = args
from xdist.looponfail import SlaveFailSession
SlaveFailSession(config, channel).main()
class SlaveFailSession:
def __init__(self, config, channel):
self.config = config
self.channel = channel
self.recorded_failures = []
self.collection_failed = False
config.pluginmanager.register(self)
config.option.looponfail = False
config.option.usepdb = False
def DEBUG(self, *args):
if self.config.option.debug:
print(" ".join(map(str, args)))
def pytest_collection(self, session):
self.session = session
self.trails = self.current_command
hook = self.session.ihook
try:
items = session.perform_collect(self.trails or None)
except pytest.UsageError:
items = session.perform_collect(None)
hook.pytest_collection_modifyitems(session=session, config=session.config, items=items)
hook.pytest_collection_finish(session=session)
return True
def pytest_runtest_logreport(self, report):
if report.failed:
self.recorded_failures.append(report)
def pytest_collectreport(self, report):
if report.failed:
self.recorded_failures.append(report)
self.collection_failed = True
def main(self):
self.DEBUG("SLAVE: received configuration, waiting for command trails")
try:
command = self.channel.receive()
except KeyboardInterrupt:
return # in the slave we can't do much about this
self.DEBUG("received", command)
self.current_command = command
self.config.hook.pytest_cmdline_main(config=self.config)
trails, failreports = [], []
for rep in self.recorded_failures:
trails.append(rep.nodeid)
loc = rep.longrepr
loc = str(getattr(loc, 'reprcrash', loc))
failreports.append(loc)
self.channel.send((trails, failreports, self.collection_failed))
class StatRecorder:
def __init__(self, rootdirlist):
self.rootdirlist = rootdirlist
self.statcache = {}
self.check() # snapshot state
def fil(self, p):
return p.check(file=1, dotfile=0) and p.ext != ".pyc"
def rec(self, p):
return p.check(dotfile=0)
def waitonchange(self, checkinterval=1.0):
while 1:
changed = self.check()
if changed:
return
py.std.time.sleep(checkinterval)
def check(self, removepycfiles=True):
changed = False
statcache = self.statcache
newstat = {}
for rootdir in self.rootdirlist:
for path in rootdir.visit(self.fil, self.rec):
oldstat = statcache.pop(path, None)
try:
newstat[path] = curstat = path.stat()
except py.error.ENOENT:
if oldstat:
changed = True
else:
if oldstat:
if oldstat.mtime != curstat.mtime or \
oldstat.size != curstat.size:
changed = True
py.builtin.print_("# MODIFIED", path)
if removepycfiles and path.ext == ".py":
pycfile = path + "c"
if pycfile.check():
pycfile.remove()
else:
changed = True
if statcache:
changed = True
self.statcache = newstat
return changed
|
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import hashlib
import json
import math
import sys
import time
from six.moves import zip
import tabulate
SELF_TIME = object()
TIME_FROM_SCOPE_START = object()
TIME_TO_SCOPE_END = object()
TIME_FROM_STACK_START = object()
TIME_TO_STACK_END = object()
TIME_FROM_LAST_IMPORTANT = object()
argp = argparse.ArgumentParser(
description='Process output of basic_prof builds')
argp.add_argument('--source', default='latency_trace.txt', type=str)
argp.add_argument('--fmt', choices=tabulate.tabulate_formats, default='simple')
argp.add_argument('--out', default='-', type=str)
args = argp.parse_args()
class LineItem(object):
def __init__(self, line, indent):
self.tag = line['tag']
self.indent = indent
self.start_time = line['t']
self.end_time = None
self.important = line['imp']
self.filename = line['file']
self.fileline = line['line']
self.times = {}
class ScopeBuilder(object):
def __init__(self, call_stack_builder, line):
self.call_stack_builder = call_stack_builder
self.indent = len(call_stack_builder.stk)
self.top_line = LineItem(line, self.indent)
call_stack_builder.lines.append(self.top_line)
self.first_child_pos = len(call_stack_builder.lines)
def mark(self, line):
line_item = LineItem(line, self.indent + 1)
line_item.end_time = line_item.start_time
self.call_stack_builder.lines.append(line_item)
def finish(self, line):
assert line['tag'] == self.top_line.tag, (
'expected %s, got %s; thread=%s; t0=%f t1=%f' %
(self.top_line.tag, line['tag'], line['thd'],
self.top_line.start_time, line['t']))
final_time_stamp = line['t']
assert self.top_line.end_time is None
self.top_line.end_time = final_time_stamp
self.top_line.important = self.top_line.important or line['imp']
assert SELF_TIME not in self.top_line.times
self.top_line.times[
SELF_TIME] = final_time_stamp - self.top_line.start_time
for line in self.call_stack_builder.lines[self.first_child_pos:]:
if TIME_FROM_SCOPE_START not in line.times:
line.times[
TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
class CallStackBuilder(object):
def __init__(self):
self.stk = []
self.signature = hashlib.md5()
self.lines = []
def finish(self):
start_time = self.lines[0].start_time
end_time = self.lines[0].end_time
self.signature = self.signature.hexdigest()
last_important = start_time
for line in self.lines:
line.times[TIME_FROM_STACK_START] = line.start_time - start_time
line.times[TIME_TO_STACK_END] = end_time - line.end_time
line.times[
TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
if line.important:
last_important = line.end_time
last_important = end_time
def add(self, line):
line_type = line['type']
self.signature.update(line_type.encode('UTF-8'))
self.signature.update(line['tag'].encode('UTF-8'))
if line_type == '{':
self.stk.append(ScopeBuilder(self, line))
return False
elif line_type == '}':
assert self.stk, (
'expected non-empty stack for closing %s; thread=%s; t=%f' %
(line['tag'], line['thd'], line['t']))
self.stk.pop().finish(line)
if not self.stk:
self.finish()
return True
return False
elif line_type == '.' or line_type == '!':
if self.stk:
self.stk[-1].mark(line)
return False
else:
raise Exception('Unknown line type: \'%s\'' % line_type)
class CallStack(object):
def __init__(self, initial_call_stack_builder):
self.count = 1
self.signature = initial_call_stack_builder.signature
self.lines = initial_call_stack_builder.lines
for line in self.lines:
for key, val in line.times.items():
line.times[key] = [val]
def add(self, call_stack_builder):
assert self.signature == call_stack_builder.signature
self.count += 1
assert len(self.lines) == len(call_stack_builder.lines)
for lsum, line in zip(self.lines, call_stack_builder.lines):
assert lsum.tag == line.tag
assert lsum.times.keys() == line.times.keys()
for k, lst in lsum.times.items():
lst.append(line.times[k])
def finish(self):
for line in self.lines:
for lst in line.times.values():
lst.sort()
builder = collections.defaultdict(CallStackBuilder)
call_stacks = collections.defaultdict(CallStack)
lines = 0
start = time.time()
with open(args.source) as f:
for line in f:
lines += 1
inf = json.loads(line)
thd = inf['thd']
cs = builder[thd]
if cs.add(inf):
if cs.signature in call_stacks:
call_stacks[cs.signature].add(cs)
else:
call_stacks[cs.signature] = CallStack(cs)
del builder[thd]
time_taken = time.time() - start
call_stacks = sorted(call_stacks.values(),
key=lambda cs: cs.count,
reverse=True)
total_stacks = 0
for cs in call_stacks:
total_stacks += cs.count
cs.finish()
def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of an already sorted list of values.
@parameter N - is a list of values. MUST be already sorted.
@parameter percent - a float value from [0.0,1.0].
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
float_idx = (len(N) - 1) * percent
idx = int(float_idx)
result = key(N[idx])
if idx < len(N) - 1:
# interpolate with the next element's value
result += (float_idx - idx) * (key(N[idx + 1]) - key(N[idx]))
return result
def tidy_tag(tag):
if tag[0:10] == 'GRPC_PTAG_':
return tag[10:]
return tag
def time_string(values):
num_values = len(values)
return '%.1f/%.1f/%.1f' % (1e6 * percentile(values, 0.5), 1e6 * percentile(
values, 0.9), 1e6 * percentile(values, 0.99))
def time_format(idx):
def ent(line, idx=idx):
if idx in line.times:
return time_string(line.times[idx])
return ''
return ent
BANNER = {'simple': 'Count: %(count)d', 'html': '<h1>Count: %(count)d</h1>'}
FORMAT = [
('TAG', lambda line: '..' * line.indent + tidy_tag(line.tag)),
('LOC', lambda line: '%s:%d' %
(line.filename[line.filename.rfind('/') + 1:], line.fileline)),
('IMP', lambda line: '*' if line.important else ''),
('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
('SELF', time_format(SELF_TIME)),
('TO_STACK_END', time_format(TIME_TO_STACK_END)),
('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
('SELF', time_format(SELF_TIME)),
('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
]
out = sys.stdout
if args.out != '-':
out = open(args.out, 'w')
if args.fmt == 'html':
out.write('<html>')
out.write('<head>')
out.write('<title>Profile Report</title>')
out.write('</head>')
accounted_for = 0
for cs in call_stacks:
out.write('\n')
if args.fmt in BANNER:
out.write(BANNER[args.fmt] % {
'count': cs.count,
})
header, _ = zip(*FORMAT)
table = []
for line in cs.lines:
fields = []
for _, fn in FORMAT:
fields.append(fn(line))
table.append(fields)
out.write(tabulate.tabulate(table, header, tablefmt=args.fmt))
accounted_for += cs.count
if accounted_for > .99 * total_stacks:
break
if args.fmt == 'html':
print('</html>')
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_BITCOIND'):
ENABLE_BITCOIND=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
os.environ["BITCOIND"] = buildDir + '/src/bitcoind' + EXEEXT
os.environ["BITCOINCLI"] = buildDir + '/src/bitcoin-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
]
testScriptsExt = [
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'invalidblockrequest.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
import collections
import functools
from itertools import chain, combinations, imap, count
import math
from operator import mul
from fractions import gcd
import random
import numpy as np
MILLION = 1000000
BILLION = 1000000000
PHI = 0.5 + 0.5 * np.sqrt(5)
def product(list):
return functools.reduce(mul, list, 1)
def is_square(x):
return is_int(math.sqrt(x))
def is_int(x):
if math.isnan(x):
return False
return int(x) == x
def digital_root(x):
return int(x - 9*int((x-1)/9))
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def promote_digits(digits):
"""
see problem 20, 16, 104
"""
for i in xrange(digits.shape[0]):
d = digits[i]
if d >= 10 and digits.shape[0] > i + 1:
remainder = d % 10
divisor = d / 10
digits[i] = remainder
digits[i+1] += divisor
return digits
def digits(n):
return map(int, str(n))
def reverse_number(n):
reversed_n = 0
power = int(np.log10(n))
while power >= 0:
n, r = divmod(n, 10)
reversed_n += r * 10 ** power
power -= 1
return reversed_n
def to_digit(array):
return sum(10**(len(array) - i - 1)*int(array[i]) for i in range(len(array)))
def nCr(n, r):
if r > n:
return 0
if r < 0 or n < 0:
return 0
f = math.factorial
return f(n) / f(r) / f(n-r)
def infinite_product(iterx, itery):
intermediate_x, intermediate_y = [], []
intercept = 0
while True:
try:
intermediate_y.append(itery.next())
intermediate_x.append(iterx.next())
for x in range(intercept+1):
y = intercept - x
yield (intermediate_x[x], intermediate_y[y])
intercept += 1
except StopIteration:
break
def random_choice_from_dist(a):
r = random.random()
a = np.array(a)
a = 1.0*a/a.sum()
running_sum = 0
i = -1
while r > running_sum:
i += 1
running_sum += a[i]
return i
def decreasing_elements(array, M, max_length):
"""
Replaces code like
M = 20
for a1 in xrange(M, 0, -1):
for a2 in xrange(a1, 0, -1):
for a3 in xrange(a2, 0, -1):
for a4 in xrange(a3, 0, -1):
yield (a1,a2,a3,a4)
with
decreasing_elements([], M, 4)
"""
if len(array) == max_length:
yield array
else:
for x in range(M+1):
for _array in decreasing_elements(array + [x], x, max_length):
yield _array
def iflatmap(func, iterable):
return chain.from_iterable((func(x) for x in iterable))
def continued_fraction_expansion(x, tol=10e-8):
# this is breaking due to precision errors. If determine sqrt, use
# sqrt_continued_fraction_expansion
while abs(x - int(x)) > tol:
yield int(x)
x = 1./(x - int(x))
def sqrt_continued_fraction_expansion(n):
"""
determines the continued fraction expansion of sqrt(n),
that is, return ai s.t.
a1 + 1/(a2 + 1/(a3 + 1/(....))) == sqrt(n)
"""
m = 0.
d = 1.
a0 = a = int(math.sqrt(n))
yield a
while True:
m = d*a - m
d = (n - m**2) / d
a = int((a0 + m)/d)
yield a
def continued_fraction_convergents(x):
"""
https://en.wikipedia.org/wiki/Continued_fraction#Infinite_continued_fractions_and_convergents
returns the values
a1, a1 + 1/a2, a1 + 1/(a2 + (1/a3)), ... == x
"""
hn_1, hn_2 = 1, 0
kn_1, kn_2 = 0, 1
for an in continued_fraction_expansion(x):
hn = an*hn_1 + hn_2
kn = an*kn_1 + kn_2
yield (hn, kn)
hn_1, hn_2 = hn, hn_1
kn_1, kn_2 = kn, kn_1
def sqrt_continued_fraction_convergents(n):
"""
https://en.wikipedia.org/wiki/Continued_fraction#Infinite_continued_fractions_and_convergents
returns the values
a1, a1 + 1/a2, a1 + 1/(a2 + (1/a3)), ... == sqrt(n)
"""
hn_1, hn_2 = 1, 0
kn_1, kn_2 = 0, 1
for an in sqrt_continued_fraction_expansion(n):
hn = an*hn_1 + hn_2
kn = an*kn_1 + kn_2
yield (hn, kn)
hn_1, hn_2 = hn, hn_1
kn_1, kn_2 = kn, kn_1
def sqrt_continued_fraction_to_decimal_expansion(n):
hn_1, hn_2 = 1, 0
kn_1, kn_2 = 0, 1
for an in sqrt_continued_fraction_expansion(n):
hn = an*hn_1 + hn_2
kn = an*kn_1 + kn_2
if kn_1 > 0 and hn/kn == hn_1/kn_1:
d = hn/kn
yield d
hn -= d*kn
hn *= 10
hn_1 -= d*kn_1
hn_1 *= 10
hn_1, hn_2 = hn, hn_1
kn_1, kn_2 = kn, kn_1
def polygonal_iterator(d):
if d == 3:
f = lambda n: n*(n+1)/2
elif d == 4:
f = lambda n: n**2
elif d == 5:
f = lambda n: n*(3*n-1)/2
elif d == 6:
f = lambda n: n*(2*n-1)
elif d == 7:
f = lambda n: n*(5*n-3)/2
elif d == 8:
f = lambda n: n*(3*n-2)
else:
raise ValueError("no function for %d" % d)
return imap(f, count(start=1))
# http://stackoverflow.com/questions/16344284/how-to-generate-a-list-of-palindrome-numbers-within-a-given-range
def palindrome_number_generator():
yield 0
lower = 1
while True:
higher = lower*10
for i in xrange(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in xrange(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindromes(lower, upper):
all_palindrome_numbers = palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def is_terminating(num, denom):
if gcd(num, denom) != 1:
denom = denom/gcd(num, denom)
while denom % 5 == 0:
denom = denom / 5
while denom % 2 == 0:
denom = denom / 2
return denom == 1
def fast_expon_mod_m(x, n, m):
if n == 1:
return x
elif n == 0:
return 1
elif n % 2 == 0:
return fast_expon_mod_m(x * x % m, n / 2, m)
elif n % 2 == 1:
return x * fast_expon_mod_m(x * x % m, (n - 1) / 2, m) % m
def fast_matrix_expon_mod_m(M, n, m):
"""
Compute M**n mod m for square numpy matrix/vector M
"""
if n == 0:
return np.eye(M.shape[0], dtype=int)
if n % 2 == 1:
return (M.dot(fast_matrix_expon_mod_m(M, n-1, m))) % m
else:
D = fast_matrix_expon_mod_m(M, n/2, m)
return (D.dot(D)) % m
@memoized
def fast_2matrix_expon_mod_m(M, n, m):
"""
Compute M**n mod m for square (2x2) list of lists
"""
def matrix_mul(
((a11, a12), (a21, a22)),
((b11, b12), (b21, b22)),
):
return (
(a11*b11 + a12*b21, a11*b12 + a12*b22),
(a21*b11 + a22*b21, a21*b12 + a22*b22)
)
def matrix_mod(((a11, a12), (a21, a22)), m):
return ((a11 % m, a12 % m), (a21 % m, a22 % m))
if n == 0:
return ((1, 0), (0, 1))
if n % 2 == 1:
return matrix_mod(matrix_mul(M, fast_2matrix_expon_mod_m(M, n-1, m)), m)
else:
D = fast_2matrix_expon_mod_m(M, n/2, m)
return matrix_mod(matrix_mul(D, D), m)
|
|
from .leaderboard import Leaderboard
from .leaderboard import grouper
from redis import StrictRedis, Redis, ConnectionPool
import math
class TieRankingLeaderboard(Leaderboard):
DEFAULT_TIES_NAMESPACE = 'ties'
def __init__(self, leaderboard_name, **options):
'''
Initialize a connection to a specific leaderboard. By default, will use a
redis connection pool for any unique host:port:db pairing.
The options and their default values (if any) are:
host : the host to connect to if creating a new handle ('localhost')
port : the port to connect to if creating a new handle (6379)
db : the redis database to connect to if creating a new handle (0)
page_size : the default number of items to return in each page (25)
connection : an existing redis handle if re-using for this leaderboard
connection_pool : redis connection pool to use if creating a new handle
'''
self.options = options
self.ties_namespace = self.options.pop(
'ties_namespace',
self.DEFAULT_TIES_NAMESPACE)
self.leaderboard_name = leaderboard_name
super(TieRankingLeaderboard, self).__init__(
leaderboard_name, **options)
def delete_leaderboard_named(self, leaderboard_name):
'''
Delete the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
'''
pipeline = self.redis_connection.pipeline()
pipeline.delete(leaderboard_name)
pipeline.delete(self._member_data_key(leaderboard_name))
pipeline.delete(self._ties_leaderboard_key(leaderboard_name))
pipeline.execute()
def change_score_for_member_in(self, leaderboard_name, member, delta, member_data=None):
'''
Change the score for a member in the named leaderboard by a delta which can be positive or negative.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
@param delta [float] Score change.
@param member_data [String] Optional member data.
'''
previous_score = self.score_for(member)
new_score = (previous_score or 0) + delta
total_members_at_previous_score = []
if previous_score is not None:
total_members_at_previous_score = self.redis_connection.zrevrangebyscore(leaderboard_name, previous_score, previous_score)
pipeline = self.redis_connection.pipeline()
if isinstance(self.redis_connection, Redis):
pipeline.zadd(leaderboard_name, member, new_score)
pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), str(float(new_score)), new_score)
else:
pipeline.zadd(leaderboard_name, new_score, member)
pipeline.zadd(self._ties_leaderboard_key(leaderboard_name), new_score, str(float(new_score)))
if member_data:
pipeline.hset(
self._member_data_key(leaderboard_name),
member,
member_data)
pipeline.execute()
if len(total_members_at_previous_score) == 1:
self.redis_connection.zrem(self._ties_leaderboard_key(leaderboard_name), str(float(previous_score)))
def rank_member_in(
self, leaderboard_name, member, score, member_data=None):
'''
Rank a member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
@param score [float] Member score.
@param member_data [String] Optional member data.
'''
member_score = None or self.redis_connection.zscore(leaderboard_name, member)
can_delete_score = member_score is not None and\
(len(self.members_from_score_range_in(leaderboard_name, member_score, member_score)) == 1) and\
member_score != score
pipeline = self.redis_connection.pipeline()
if isinstance(self.redis_connection, Redis):
pipeline.zadd(leaderboard_name, member, score)
pipeline.zadd(self._ties_leaderboard_key(leaderboard_name),
str(float(score)), score)
else:
pipeline.zadd(leaderboard_name, score, member)
pipeline.zadd(self._ties_leaderboard_key(leaderboard_name),
score, str(float(score)))
if can_delete_score:
pipeline.zrem(self._ties_leaderboard_key(leaderboard_name),
str(float(member_score)))
if member_data:
pipeline.hset(
self._member_data_key(leaderboard_name),
member,
member_data)
pipeline.execute()
def rank_member_across(
self, leaderboards, member, score, member_data=None):
'''
Rank a member across multiple leaderboards.
@param leaderboards [Array] Leaderboard names.
@param member [String] Member name.
@param score [float] Member score.
@param member_data [String] Optional member data.
'''
for leaderboard_name in leaderboards:
self.rank_member_in(leaderboard_name, member, score, member_data)
def rank_members_in(self, leaderboard_name, members_and_scores):
'''
Rank an array of members in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param members_and_scores [Array] Variable list of members and scores.
'''
for member, score in grouper(2, members_and_scores):
self.rank_member_in(leaderboard_name, member, score)
def remove_member_from(self, leaderboard_name, member):
'''
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
'''
member_score = None or self.redis_connection.zscore(
leaderboard_name, member)
can_delete_score = member_score and len(
self.members_from_score_range_in(leaderboard_name, member_score, member_score)) == 1
pipeline = self.redis_connection.pipeline()
pipeline.zrem(leaderboard_name, member)
if can_delete_score:
pipeline.zrem(self._ties_leaderboard_key(leaderboard_name),
str(float(member_score)))
pipeline.hdel(self._member_data_key(leaderboard_name), member)
pipeline.execute()
def rank_for_in(self, leaderboard_name, member):
'''
Retrieve the rank for a member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
@return the rank for a member in the leaderboard.
'''
member_score = self.score_for_in(leaderboard_name, member)
if self.order == self.ASC:
try:
return self.redis_connection.zrank(
self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) + 1
except:
return None
else:
try:
return self.redis_connection.zrevrank(
self._ties_leaderboard_key(leaderboard_name), str(float(member_score))) + 1
except:
return None
def remove_members_in_score_range_in(
self, leaderboard_name, min_score, max_score):
'''
Remove members from the named leaderboard in a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
'''
pipeline = self.redis_connection.pipeline()
pipeline.zremrangebyscore(
leaderboard_name,
min_score,
max_score)
pipeline.zremrangebyscore(
self._ties_leaderboard_key(leaderboard_name),
min_score,
max_score)
pipeline.execute()
def expire_leaderboard_for(self, leaderboard_name, seconds):
'''
Expire the given leaderboard in a set number of seconds. Do not use this with
leaderboards that utilize member data as there is no facility to cascade the
expiration out to the keys for the member data.
@param leaderboard_name [String] Name of the leaderboard.
@param seconds [int] Number of seconds after which the leaderboard will be expired.
'''
pipeline = self.redis_connection.pipeline()
pipeline.expire(leaderboard_name, seconds)
pipeline.expire(self._ties_leaderboard_key(leaderboard_name), seconds)
pipeline.expire(self._member_data_key(leaderboard_name), seconds)
pipeline.execute()
def expire_leaderboard_at_for(self, leaderboard_name, timestamp):
'''
Expire the given leaderboard at a specific UNIX timestamp. Do not use this with
leaderboards that utilize member data as there is no facility to cascade the
expiration out to the keys for the member data.
@param leaderboard_name [String] Name of the leaderboard.
@param timestamp [int] UNIX timestamp at which the leaderboard will be expired.
'''
pipeline = self.redis_connection.pipeline()
pipeline.expireat(leaderboard_name, timestamp)
pipeline.expireat(
self._ties_leaderboard_key(leaderboard_name), timestamp)
pipeline.expireat(self._member_data_key(leaderboard_name), timestamp)
pipeline.execute()
def ranked_in_list_in(self, leaderboard_name, members, **options):
'''
Retrieve a page of leaders from the named leaderboard for a given list of members.
@param leaderboard_name [String] Name of the leaderboard.
@param members [Array] Member names.
@param options [Hash] Options to be used when retrieving the page from the named leaderboard.
@return a page of leaders from the named leaderboard for a given list of members.
'''
ranks_for_members = []
pipeline = self.redis_connection.pipeline()
for member in members:
if self.order == self.ASC:
pipeline.zrank(leaderboard_name, member)
else:
pipeline.zrevrank(leaderboard_name, member)
pipeline.zscore(leaderboard_name, member)
responses = pipeline.execute()
for index, member in enumerate(members):
data = {}
data[self.MEMBER_KEY] = member
score = responses[index * 2 + 1]
if score is not None:
score = float(score)
data[self.SCORE_KEY] = score
if self.order == self.ASC:
data[self.RANK_KEY] = self.redis_connection.zrank(
self._ties_leaderboard_key(leaderboard_name), str(data[self.SCORE_KEY]))
else:
data[self.RANK_KEY] = self.redis_connection.zrevrank(
self._ties_leaderboard_key(leaderboard_name), str(data[self.SCORE_KEY]))
if data[self.RANK_KEY] is not None:
data[self.RANK_KEY] += 1
else:
if not options.get('include_missing', True):
continue
ranks_for_members.append(data)
if ('with_member_data' in options) and (True == options['with_member_data']):
for index, member_data in enumerate(self.members_data_for_in(leaderboard_name, members)):
try:
ranks_for_members[index][self.MEMBER_DATA_KEY] = member_data
except:
pass
if 'sort_by' in options:
if self.RANK_KEY == options['sort_by']:
ranks_for_members = sorted(
ranks_for_members,
key=lambda member: member[
self.RANK_KEY])
elif self.SCORE_KEY == options['sort_by']:
ranks_for_members = sorted(
ranks_for_members,
key=lambda member: member[
self.SCORE_KEY])
return ranks_for_members
def _ties_leaderboard_key(self, leaderboard_name):
'''
Key for ties leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@return a key in the form of +leaderboard_name:ties_namespace+
'''
return '%s:%s' % (leaderboard_name, self.ties_namespace)
|
|
from typing import Dict
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class pXScoring:
"""
Class to handle all current and future scoring for PerimeterX objects
"""
@staticmethod
def dbotscore_from_risk(risk_score: int, thresholds: Dict[str, Any]) -> int:
"""
Create the DBotScore structure first using the Common.DBotScore class.
:type risk_score: ``int``
:param risk_score: PerimeterX provided risk score for the IP Address
:return: Returns the relevant DBotScore value for the provided PerimeterX risk score
:rtype: ``int``
"""
if risk_score > thresholds['bad_threshold']:
dbot_score = Common.DBotScore.BAD # bad
elif risk_score > thresholds['suspicious_threshold']:
dbot_score = Common.DBotScore.SUSPICIOUS # suspicious
elif risk_score > thresholds['good_threshold']:
dbot_score = Common.DBotScore.GOOD # good
else:
dbot_score = Common.DBotScore.NONE # unknown
return dbot_score
@staticmethod
def get_ip_score(ip_address: str, risk_score: int, thresholds: Dict[str, Any]):
"""
Create the DBotScore structure first using the Common.DBotScore class.
:type ip_address: ``str``
:param ip_address: IP Address to be used as the indicator for this entry
:type risk_score: ``int``
:param risk_score: PerimeterX provided risk sco1re for the IP Address
:return: Updated List of Common IP Addresses with this entry appended
:rtype: ``List[Common.IP]``
"""
# create the DBotScore object and populate it with the needed values
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name='PerimeterX',
score=pXScoring.dbotscore_from_risk(risk_score, thresholds),
malicious_description='High risk score indicates high probability that the requests from the IP are '
'malicious '
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
return Common.IP(
ip=ip_address,
# asn=ip_data.get('asn'),
dbot_score=dbot_score
)
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def test_api_connection(self, api_key: str) -> Dict[str, Any]:
"""
Makes a call to the status API path to confirm the proper URL and Authorization token were provided
"""
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
return self._http_request(
method='GET',
full_url=f'{self._base_url}?search=ip:1.1.1.1&tops=path',
headers=headers
)
def post_investigate_by_ip(self, ip_type: str, ip_address: str, api_key: str) -> Dict[str, Any]:
"""
Query the PerimeterX API to get the relevant details regarding the provided IP within a particular customer's own data
:type ip_address: ``str``
:param ip_address: IP Address to be used as the indicator for this entry
:type ip_type: ``str``
:param ip_type: The type of IP address we will be querying (true_ip or socket_ip)
:return: The JSON response body from the PerimeterX API
:rtype: ``Dict[str, Any]``
"""
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
request_params: Dict[str, Any] = {}
if ip_type:
request_params['ip_type'] = ip_type
if ip_address:
request_params['ip_address'] = f'search=ip:{ip_address}'
return self._http_request(
method='GET',
full_url=f'{self._base_url}?{request_params["ip_address"]}&tops=user-agent,path,socket_ip_classification',
headers=headers
)
def post_investigate_by_name(self, name_type: str, name: str) -> Dict[str, Any]:
"""
THIS IS NOT CURRENTLY IMPLEMENTED
Query the PerimeterX API to get the relevant details regarding the provided name within a particular customer's own data
:type name: ``str``
:param name: name to be used as the indicator for this entry
:type name_type: ``str``
:param name_type: The type of name we will be querying (domain or param)
:return: The JSON response body from the PerimeterX API
:rtype: ``Dict[str, Any]``
"""
request_params: Dict[str, Any] = {}
if name_type:
request_params['name_type'] = name_type
if name:
request_params['name'] = name
return self._http_request(
method='POST',
full_url=self._base_url,
json_data=request_params
)
def test_module(client: Client, api_key):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: PerimeterX client
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
result = client.test_api_connection(api_key)
if result['success']:
return 'ok'
else:
return 'Connection to api failed: ' + result['errors']
except DemistoException as de:
return 'Connection to api failed with exception: ' + de.message
def perimeterx_get_investigate_details(client: Client, args: Dict[str, Any],
thresholds: Dict[str, Any], api_key: str) -> CommandResults:
"""
Collect the required details to query the PerimeterX API to get the relevant details regarding the provided
search term within a particular customer's own data
:type search_term: ``str``
:param search_term: This is the entry that we'll be querying for against the PerimeterX API
:type search_type: ``str``
:param search_type: The type of query that will be run against the PerimeterX API
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# Check for the field to query for
search_type = args.get('search_type', None)
if not search_type:
raise ValueError('No search_type specified')
# Check to make sure we have a query term
search_term = args.get('search_term', None)
if not search_term:
raise ValueError('No search_term specified')
# Check for an IP based investigation
supported_ip_search_types = ['true_ip', 'socket_ip']
if search_type in supported_ip_search_types:
"""
Run an IP based search if the search type is one supported by the IP types
"""
result = client.post_investigate_by_ip(ip_type=search_type, ip_address=search_term, api_key=api_key)
indicator = pXScoring.get_ip_score(ip_address=search_term, risk_score=result['max_risk_score'],
thresholds=thresholds)
return CommandResults(
outputs_prefix='PerimeterX',
outputs_key_field='',
outputs=result,
indicator=indicator
)
elif search_type == 'name':
"""
THIS IS NOT CURRENTLY IMPLEMENTED
Run a name based search if the search type is name
"""
result = client.post_investigate_by_name(name_type=search_type, name=search_term)
else:
"""
Generate an error because the search type is not supported
"""
raise ValueError('Invalid search_type provided')
return CommandResults(
outputs_prefix='PerimeterX',
outputs_key_field='',
outputs=result
)
def ip(client: Client, args, thresholds: Dict[str, Any], api_key):
"""
Collect the details to run an IP Reputation query against the PerimeterX API
:type ip: ``str``
:param ip: Results will be provided for list of IPs
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# Check to make sure we have a query term
ip_list = argToList(args.get('ip'))
results = []
for ip_address in ip_list:
result = client.post_investigate_by_ip(ip_type='true_ip', ip_address=ip_address, api_key=api_key)
indicator = pXScoring.get_ip_score(ip_address=ip_address, risk_score=result['max_risk_score'],
thresholds=thresholds)
readable_output = f'{indicator}'
cr = CommandResults(
readable_output=readable_output,
outputs_prefix='PerimeterX',
outputs_key_field='',
outputs=result,
indicator=indicator
)
results.append(cr)
return results
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
api_key = params.get('apikey')
# get the service API url
base_url = urljoin(params['url'], '/v1/bot-defender/')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
# get the DBot Thresholds
thresholds = {
"good_threshold": int(params.get('dbotGoodThreshold')),
"suspicious_threshold": int(params.get('dbotSuspiciousThreshold')),
"bad_threshold": int(params.get('dbotBadThreshold')),
"unknown_threshold": 0
}
command = demisto.command()
LOG(f'Command being called is {command}')
try:
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_key)
demisto.results(result)
elif command == 'ip':
return_results(ip(client, demisto.args(), thresholds=thresholds, api_key=api_key))
elif command == 'perimeterx_get_investigate_details':
return_results(perimeterx_get_investigate_details(client=client, args=demisto.args(), thresholds=thresholds,
api_key=api_key))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
def testGradientInsideLoop(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
def body(_):
_ = v + 1.0 # This reads the variable inside the loop context
with backprop.GradientTape() as t:
result = v * 2
self.assertTrue(t.gradient(result, v) is not None)
return 1.0
control_flow_ops.while_loop(lambda i: False, body, [1.0])
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
def f(x):
return array_ops.where(x < 10, x, x * x)
g = backprop.gradients_function(f)
self.assertAllEqual(g(5.)[0], 1.0)
self.assertAllEqual(g(50.)[0], 100.0)
def testTwoTargets(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertAllEqual(dx, 2.0)
self.assertAllEqual(dy, 3.0)
def testOutputGradUsedInComputation(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
loss = x * y
dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0])
self.assertAllEqual(dx, 4.0)
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testGradientInteger(self):
def f(x):
return x + x
int_tensor = constant_op.constant(1)
self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None)
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testGradientsFunctionInCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
(y,) = backprop.gradients_function(lambda x: x * x)(x)
def grad(dy):
return [2 * dy]
return y, grad
self.assertAllEqual(f(1.0), 2.0)
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with ops.Graph().as_default(), self.cached_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = tf_embedding.eval()
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testImplicitGradOrdering(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
v1 = resource_variable_ops.ResourceVariable(2.0)
def f():
x = v1 * v1
y = v0 * v0
return x + y
grads = backprop.implicit_grad(f)()
ordered_variables = [x[1] for x in grads]
self.assertTrue(ordered_variables[0] is v0)
self.assertTrue(ordered_variables[1] is v1)
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
def testTapeStopStartRecording(self):
with backprop.GradientTape(persistent=True) as t:
x = resource_variable_ops.ResourceVariable(1.0)
x2 = x * 2 # This should be differentiated through.
with t.stop_recording():
y = x2 * x2
z = x2 * x2
self.assertEqual(t.gradient(y, x2), None)
# If the x*2 was not differentiated through, this would be 2.0, not 4.0
self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
t.reset()
loss += v * v
self.assertAllEqual(t.gradient(loss, v), 2.0)
def testAutomaticWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
loss += v * v
self.assertAllEqual([v], t.watched_variables())
def testExplicitWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
@test_util.assert_no_new_tensors
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
with backprop.GradientTape(persistent=True) as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
with self.assertRaises(ValueError):
with t:
pass
@test_util.assert_no_new_tensors
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
@test_util.run_in_graph_and_eager_modes
def testWatchingIsTapeLocal(self):
x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
with backprop.GradientTape() as tape1:
with backprop.GradientTape() as tape2:
tape1.watch(x1)
tape2.watch([x1, x2])
y = x1 ** 3
z = x2 ** 2
dy, dz = tape2.gradient([y, z], [x1, x2])
d2y, d2z = tape1.gradient([dy, dz], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertEqual(self.evaluate(d2y), 12.0)
self.assertIsNone(d2z)
@test_util.assert_no_new_tensors
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=False)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testPersistentMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=True)
_, vjp = wrapped_fn(constant_op.constant(3.0))
vjp_result1 = vjp(2.0)[0]
vjp_result2 = vjp(2.0)[0]
self.assertAllEqual(vjp_result1, vjp_result2, 12.0)
@test_util.assert_no_new_tensors
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
@test_util.assert_no_new_tensors
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
@test_util.assert_no_new_tensors
def testStopGradient(self):
grad = backprop.gradients_function(
lambda x: array_ops.stop_gradient(math_ops.argmax(x)))
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testArgmax(self):
def argmax(x):
i = math_ops.argmax(x)
return array_ops.stop_gradient(i)
grad = backprop.gradients_function(argmax)
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testGPUImplicitGrad(self):
if not context.context().num_gpus():
self.skipTest('No GPU found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
@test_util.assert_no_new_tensors
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testTensorCopyGPU2CPU2GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
@test_util.assert_no_new_tensors
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
def testGradientTapeReEnterContext(self):
g = backprop.GradientTape()
with g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2*x
with g:
z = 2*y
grad = g.gradient(target=z, sources=[x])
self.assertEqual(self.evaluate(grad), [4.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
grad = g.gradient(target=y, sources=[x, x])
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
g.watch(x)
g.watch(y)
z = x * x + x * y
grad = g.gradient(target=z, sources=[x, x])
self.assertEqual(self.evaluate(grad), [11.0, 11.0])
grad = g.gradient(target=z, sources=[y, x])
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
# cached, leading to a different gradient then what one might expect.
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.1)
x3 = constant_op.constant(3.2)
g.watch(x1)
g.watch(x2)
g.watch(x3)
y = x1 + 2 * x2 + 3 * x3
self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0])
self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,))
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0))
self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])),
[(1.0, 2.0), (2.0, 3.0)])
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))),
(1.0, 2.0, [1.0, 3.0]))
self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])),
[1.0, {'x2': 2.0, 'x3': 3.0}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGadientTapeCalledOnConstantTarget(self):
with backprop.GradientTape() as g:
x = variables.Variable([3.0])
y = variables.Variable([2.0])
with self.assertRaisesRegexp(
ValueError,
'GradientTape.gradient is not supported for variable targets.'):
g.gradient(x, y)
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
def true_fn():
return x
def false_fn():
return x * x
with backprop.GradientTape() as g:
g.watch(x)
y = control_flow_ops.cond(x < x, true_fn, false_fn)
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
def cond(i, _):
return i < 3
def body(i, x):
return i + 1, x * 2
with backprop.GradientTape() as g:
g.watch([x])
_, y = control_flow_ops.while_loop(cond, body, [i, x])
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 4.0)
@test_util.assert_no_new_tensors
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3)
dy_dx = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy_dx), 2 * 3)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x ** 3 # y := x^3
dy_dx = g.gradient(y, x) # dy/dx := 3x^2
d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x
d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6
x = 3
self.assertEqual(self.evaluate(y), x ** 3)
self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2)
self.assertEqual(self.evaluate(d2y_dx2), 6 * x)
self.assertEqual(self.evaluate(d3y_dx3), 6)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape(persistent=True) as gg:
gg.watch(y)
z = 2 * y
for _ in range(2):
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
del gg
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
grad = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(grad), 12.0)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch(x)
y = x * x
z = y * y
dz_dx, dz_dy = g.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 108.0)
self.assertEqual(self.evaluate(dz_dy), 18.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsDefault(self):
x = constant_op.constant(1.0)
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x)
self.assertEqual(dz_dx, None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsZeros(self):
x = constant_op.constant(1.0, shape=[2, 2])
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnknownUnconnectedGradientsValueGiven(self):
x = constant_op.constant(1.0)
y = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
with self.assertRaisesRegexp(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
g.gradient(z, x, unconnected_gradients='nonsense')
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsNestedDefunZeros(self):
@function.defun
def f(x):
return x * x
@function.defun
def h(y):
z = f(y)
return array_ops.stop_gradient(z)
x = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch(x)
y = h(x)
dy_dx = g.gradient(y, x, unconnected_gradients='zero')
self.assertEqual(0.0, self.evaluate(dy_dx))
@test_util.assert_no_new_tensors
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
@test_util.assert_no_new_tensors
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
@test_util.assert_no_new_tensors
def testTensorCopyCPU2GPU2CPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
@test_util.assert_no_new_tensors
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
@test_util.assert_no_new_tensors
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
@test_util.assert_no_new_tensors
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3., name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
@test_util.assert_no_new_tensors
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
def testZerosCacheDoesntLeakAcrossGraphs(self):
with ops.Graph().as_default():
def get_grad():
with ops.Graph().as_default(), self.cached_session():
t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
with backprop.GradientTape() as tape:
tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1**2
y = array_ops.concat([y1, t], axis=1)
return self.evaluate(tape.gradient(y, x))
grad1 = get_grad()
grad2 = get_grad()
self.assertAllEqual(grad1, grad2)
@test_util.run_in_graph_and_eager_modes
def testSelectivelyWatchVariables(self):
x1 = resource_variable_ops.ResourceVariable(1.0)
x2 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x2)
y = x1**2
z = x2**3
self.assertTupleEqual(tape.watched_variables(), (x2,))
dy, dz = tape.gradient([y, z], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertIsNone(dy)
self.assertEqual(self.evaluate(dz), 3.0)
@test_util.run_in_graph_and_eager_modes
def testDifferentiatingScalarCache(self):
# In the following test, if x2 = x1 (i.e the objects are the exact same),
# then y is essentially, 2*x1, and dy/dx1 = 2.
# When we had a pure scalar cache in eager, this would be the case. This
# test prevents us from going back to that case.
with backprop.GradientTape(persistent=False) as g:
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.0)
g.watch(x1)
g.watch(x2)
y = x1 + x2
grad = g.gradient(target=y, sources=[x1])
self.assertEqual(self.evaluate(grad), [1.0])
def testVariablesAndConstantsProduceTheSameGradients(self):
# In the following test, differentiating [y, z] against [a, b] gives:
# (dy/da + dz/da, dy/db + dz/db).
# If a and b are the same constant, dz/da will not be 0 (which it should
# be).
# This is solved by using variable since doing a read_value on a tensor will
# produce a new tensor and corresponding TensorHandle, and not reuse the
# same tensor (which would happen if we are using a cache and reusing
# EagerTensor objects).
def get_grads(a, b):
with backprop.GradientTape() as tape:
tape.watch([a, b])
y = a**3
z = b**2
return tape.gradient([y, z], [a, b])
gradients_constants = get_grads(
constant_op.constant(2.0), constant_op.constant(2.0))
gradients_variables = get_grads(
resource_variable_ops.ResourceVariable(2.0),
resource_variable_ops.ResourceVariable(2.0))
self.assertAllEqual(gradients_constants, gradients_variables)
def testUnknownShapes(self):
with ops.Graph().as_default():
with backprop.GradientTape() as tape:
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
tape.watch(a)
b = a**3
db_da = tape.gradient(b, a)
with self.cached_session() as sess:
self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0}))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientInEagerAndGraph(self):
@custom_gradient.custom_gradient
def f(x):
y = x * x
def grad(dy):
return [4 * dy]
return y, grad
with backprop.GradientTape() as t:
c = constant_op.constant(1.0)
t.watch(c)
g = f(c)
self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0)
if __name__ == '__main__':
test.main()
|
|
'''
Linux file chooser
------------------
'''
from plyer.facades import FileChooser
from distutils.spawn import find_executable as which
import os
import subprocess as sp
import time
class SubprocessFileChooser(object):
'''A file chooser implementation that allows using
subprocess back-ends.
Normally you only need to override _gen_cmdline, executable,
separator and successretcode.
'''
executable = ""
'''The name of the executable of the back-end.
'''
separator = "|"
'''The separator used by the back-end. Override this for automatic
splitting, or override _split_output.
'''
successretcode = 0
'''The return code which is returned when the user doesn't close the
dialog without choosing anything, or when the app doesn't crash.
'''
path = None
multiple = False
filters = []
preview = False
title = None
icon = None
show_hidden = False
def __init__(self, **kwargs):
# Simulate Kivy's behavior
for i in kwargs:
setattr(self, i, kwargs[i])
_process = None
def _run_command(self, cmd):
self._process = sp.Popen(cmd, stdout=sp.PIPE)
while True:
ret = self._process.poll()
if ret is not None:
if ret == self.successretcode:
out = self._process.communicate()[0].strip().decode('utf8')
self.selection = self._split_output(out)
return self.selection
else:
return None
time.sleep(0.1)
def _split_output(self, out):
'''This methods receives the output of the back-end and turns
it into a list of paths.
'''
return out.split(self.separator)
def _gen_cmdline(self):
'''Returns the command line of the back-end, based on the current
properties. You need to override this.
'''
raise NotImplementedError()
def run(self):
return self._run_command(self._gen_cmdline())
class ZenityFileChooser(SubprocessFileChooser):
'''A FileChooser implementation using Zenity (on GNU/Linux).
Not implemented features:
* show_hidden
* preview
'''
executable = "zenity"
separator = "|"
successretcode = 0
def _gen_cmdline(self):
cmdline = [
which(self.executable),
"--file-selection",
"--confirm-overwrite"
]
if self.multiple:
cmdline += ["--multiple"]
if self.mode == "save":
cmdline += ["--save"]
elif self.mode == "dir":
cmdline += ["--directory"]
if self.path:
cmdline += ["--filename", self.path]
if self.title:
cmdline += ["--name", self.title]
if self.icon:
cmdline += ["--window-icon", self.icon]
for f in self.filters:
if type(f) == str:
cmdline += ["--file-filter", f]
else:
cmdline += [
"--file-filter",
"{name} | {flt}".format(name=f[0], flt=" ".join(f[1:]))
]
return cmdline
class KDialogFileChooser(SubprocessFileChooser):
'''A FileChooser implementation using KDialog (on GNU/Linux).
Not implemented features:
* show_hidden
* preview
'''
executable = "kdialog"
separator = "\n"
successretcode = 0
def _gen_cmdline(self):
cmdline = [which(self.executable)]
filt = []
for f in self.filters:
if type(f) == str:
filt += [f]
else:
filt += list(f[1:])
if self.mode == "dir":
cmdline += [
"--getexistingdirectory",
(self.path if self.path else os.path.expanduser("~"))
]
elif self.mode == "save":
cmdline += [
"--getopenfilename",
(self.path if self.path else os.path.expanduser("~")),
" ".join(filt)
]
else:
cmdline += [
"--getopenfilename",
(self.path if self.path else os.path.expanduser("~")),
" ".join(filt)
]
if self.multiple:
cmdline += ["--multiple", "--separate-output"]
if self.title:
cmdline += ["--title", self.title]
if self.icon:
cmdline += ["--icon", self.icon]
return cmdline
class YADFileChooser(SubprocessFileChooser):
'''A NativeFileChooser implementation using YAD (on GNU/Linux).
Not implemented features:
* show_hidden
'''
executable = "yad"
separator = "|?|"
successretcode = 0
def _gen_cmdline(self):
cmdline = [
which(self.executable),
"--file-selection",
"--confirm-overwrite",
"--geometry",
"800x600+150+150"
]
if self.multiple:
cmdline += ["--multiple", "--separator", self.separator]
if self.mode == "save":
cmdline += ["--save"]
elif self.mode == "dir":
cmdline += ["--directory"]
if self.preview:
cmdline += ["--add-preview"]
if self.path:
cmdline += ["--filename", self.path]
if self.title:
cmdline += ["--name", self.title]
if self.icon:
cmdline += ["--window-icon", self.icon]
for f in self.filters:
if type(f) == str:
cmdline += ["--file-filter", f]
else:
cmdline += [
"--file-filter",
"{name} | {flt}".format(name=f[0], flt=" ".join(f[1:]))
]
return cmdline
CHOOSERS = {
"gnome": ZenityFileChooser,
"kde": KDialogFileChooser,
"yad": YADFileChooser
}
class LinuxFileChooser(FileChooser):
'''FileChooser implementation for GNu/Linux. Accepts one additional
keyword argument, *desktop_override*, which, if set, overrides the
back-end that will be used. Set it to "gnome" for Zenity, to "kde"
for KDialog and to "yad" for YAD (Yet Another Dialog).
If set to None or not set, a default one will be picked based on
the running desktop environment and installed back-ends.
'''
desktop = None
if (str(os.environ.get("XDG_CURRENT_DESKTOP")).lower() == "kde"
and which("kdialog")):
desktop = "kde"
elif which("yad"):
desktop = "yad"
elif which("zenity"):
desktop = "gnome"
def _file_selection_dialog(self, desktop_override=desktop, **kwargs):
if not desktop_override:
desktop_override = desktop
# This means we couldn't find any back-end
if not desktop_override:
raise OSError("No back-end available. Please install one.")
chooser = CHOOSERS[desktop_override]
c = chooser(**kwargs)
return c.run()
def instance():
return LinuxFileChooser()
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Supreeth Herle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VBS RRC Stats Module."""
from protobuf_to_dict import protobuf_to_dict
from empower.vbsp.messages import statistics_pb2
from empower.vbsp.messages import configs_pb2
from empower.vbsp.messages import main_pb2
from empower.core.ue import UE
from empower.datatypes.etheraddress import EtherAddress
from empower.vbsp.vbspserver import ModuleVBSPWorker
from empower.core.module import ModuleTrigger
from empower.vbs_stats import RRC_STATS_RAT_TYPE
from empower.vbs_stats import RRC_STATS_REPORT_CONF_TYPE
from empower.vbs_stats import RRC_STATS_TRIGGER_QUANT
from empower.vbs_stats import RRC_STATS_BW
from empower.vbs_stats import RRC_STATS_REPORT_INTR
from empower.vbs_stats import RRC_STATS_NUM_REPORTS
from empower.vbs_stats import RRC_STATS_EVENT_THRESHOLD_TYPE
from empower.vbs_stats import PRT_VBSP_RRC_STATS
from empower.events.ueleave import ueleave
from empower.ue_confs.ue_rrc_meas_confs import ue_rrc_meas_confs
from empower.vbsp.vbspconnection import create_header
from empower.core.utils import ether_to_hex
from empower.main import RUNTIME
class VBSRRCStats(ModuleTrigger):
""" VBSRRCStats object. """
MODULE_NAME = "vbs_rrc_stats"
REQUIRED = ['module_type', 'worker', 'tenant_id', 'vbs', 'ue', 'meas_req']
def __init__(self):
ModuleTrigger.__init__(self)
# parameters
self._vbs = None
self._ue = None
self._meas_req = None
self._meas_reply = None
self._meas = {}
def ue_leave_callback(self, ue):
"""Called when an UE disconnects from a VBS."""
self.log.info("UE %s disconnected" % ue.rnti)
worker = RUNTIME.components[VBSRRCStatsWorker.__module__]
module_ids = []
module_ids.extend(worker.modules.keys())
for module_id in module_ids:
# Module object
m = worker.modules[module_id]
# Remove all the module pertaining to disconnected UE
if m.ue == ue.rnti and EtherAddress(m.vbs) == ue.vbs.addr:
m.unload()
@property
def ue(self):
"""Return UE."""
return self._ue
@ue.setter
def ue(self, value):
"""Set UE."""
self._ue = value
@property
def vbs(self):
"""Return VBS."""
return self._vbs
@vbs.setter
def vbs(self, value):
"""Set VBSP."""
vbses = RUNTIME.tenants[self.tenant_id].vbses
if EtherAddress(value) not in vbses:
raise ValueError("Invalid vbs parameter")
self._vbs = EtherAddress(value)
@property
def meas_req(self):
"""Return configuration of RRC measurements requested."""
return self._meas_req
@meas_req.setter
def meas_req(self, value):
"""Set configuration of RRC measurements requested."""
if self.meas_req:
raise ValueError("Cannot update configuration")
if "rat_type" not in value:
raise ValueError("Missing measurement RAT type")
if value["rat_type"] not in RRC_STATS_RAT_TYPE:
raise ValueError("Invalid measurement RAT type")
if "bandwidth" not in value:
raise ValueError("Missing measurement bandwidth (num. of RBs)")
if value["bandwidth"] not in RRC_STATS_BW:
raise ValueError("Invalid measurement bandwidth (num. of RBs)")
if "carrier_freq" not in value:
raise ValueError("Missing frequency (EARFCN) to measure parameter")
if "cells_to_measure" in value and len(value["cells_to_measure"]) > 32:
raise ValueError("Num. of cells to measure must be < 32")
if "blacklist_cells" in value and len(value["blacklist_cells"]) > 32:
raise ValueError("Num. of blacklist cells must be < 32")
if "report_type" not in value:
raise ValueError("Missing measurement report type")
if value["report_type"] not in RRC_STATS_REPORT_CONF_TYPE:
raise ValueError("Invalid measurement report type")
if value["report_type"] == "A3" and "a3_offset" not in value:
raise ValueError("Missing a3_offset parameter for A3 event")
if value["report_type"] in ["A1", "A2", "A4", "A5"] and "threshold1" not in value:
raise ValueError("Missing threshold1 parameter for given event")
if value["report_type"] in ["A1", "A2", "A4", "A5"]:
if "type" not in value["threshold1"]:
raise ValueError("Missing threshold1 type parameter")
if value["threshold1"]["type"] not in RRC_STATS_EVENT_THRESHOLD_TYPE:
raise ValueError("Invalid threshold1 type parameter")
if "value" not in value["threshold1"]:
raise ValueError("Missing threshold1 value parameter")
if value["report_type"] == "A5" and "threshold2" not in value:
raise ValueError("Missing threshold2 parameter for A5 event")
if value["report_type"] == "A5":
if "type" not in value["threshold2"]:
raise ValueError("Missing threshold2 type parameter")
if value["threshold2"]["type"] not in RRC_STATS_EVENT_THRESHOLD_TYPE:
raise ValueError("Invalid threshold2 type parameter")
if "value" not in value["threshold2"]:
raise ValueError("Missing threshold2 value parameter")
if value["threshold2"]["type"] != value["threshold1"]["type"]:
raise ValueError("threshold1 and threshold2 must be equal")
if "report_interval" not in value:
raise ValueError("Missing interval between reports parameter")
if value["report_interval"] not in RRC_STATS_REPORT_INTR:
raise ValueError("Interval b/w reports must be b/w 1 & 3600 secs")
if "trigger_quantity" not in value:
raise ValueError("Missing trigger_quantity parameter")
if value["trigger_quantity"] not in RRC_STATS_TRIGGER_QUANT:
raise ValueError("Invalid trigger_quantity parameter")
if "num_of_reports" not in value:
raise ValueError("Missing num_of_reports parameter")
if value["num_of_reports"] not in RRC_STATS_NUM_REPORTS:
raise ValueError("Invalid Num. of measurement reports to report")
if "max_report_cells" not in value:
raise ValueError("Missing max_report_cells parameter")
if value["max_report_cells"] > 8 or value["max_report_cells"] < 1:
raise ValueError("Max. cells to report must be b/w 1 & 8")
self._meas_req = value
@property
def meas(self):
"""Return all the RRC measurements for this module."""
return self._meas
@property
def meas_reply(self):
"""Return RRC measurements reply."""
return self._meas_reply
@meas_reply.setter
def meas_reply(self, response):
"""Set RRC measurements reply."""
tenant = RUNTIME.tenants[self.tenant_id]
ue_addr = (self.vbs, self.ue)
ue = tenant.ues[ue_addr]
self._meas_reply = protobuf_to_dict(response)
event_type = response.WhichOneof("event_types")
meas = self._meas_reply[event_type]["mRRC_meas"]["repl"]
if "PCell_rsrp" in meas:
ue.pcell_rsrp = meas["PCell_rsrp"]
if "PCell_rsrq" in meas:
ue.pcell_rsrq = meas["PCell_rsrq"]
if "neigh_meas" in meas:
for k in meas["neigh_meas"].keys():
if k == "EUTRA_meas":
# EUTRA measurement result
for m in meas["neigh_meas"][k]:
if m["phys_cell_id"] not in ue.rrc_meas:
self._meas[m["phys_cell_id"]] = {}
ue.rrc_meas[m["phys_cell_id"]] = {}
ue.rrc_meas[m["phys_cell_id"]]["RAT_type"] = "EUTRA"
if "meas_result" in m:
if "rsrp" in m["meas_result"]:
self._meas[m["phys_cell_id"]]["rsrp"] = \
m["meas_result"]["rsrp"]
ue.rrc_meas[m["phys_cell_id"]]["rsrp"] = \
m["meas_result"]["rsrp"]
else:
self._meas[m["phys_cell_id"]]["rsrp"] = -139
ue.rrc_meas[m["phys_cell_id"]]["rsrp"] = -139
if "rsrq" in m["meas_result"]:
self._meas[m["phys_cell_id"]]["rsrq"] = \
m["meas_result"]["rsrq"]
ue.rrc_meas[m["phys_cell_id"]]["rsrq"] = \
m["meas_result"]["rsrq"]
else:
self._meas[m["phys_cell_id"]]["rsrq"] = -19
ue.rrc_meas[m["phys_cell_id"]]["rsrq"] = -19
else:
self._meas[m["phys_cell_id"]]["rsrp"] = -139
self._meas[m["phys_cell_id"]]["rsrq"] = -19
ue.rrc_meas[m["phys_cell_id"]]["rsrp"] = -139
ue.rrc_meas[m["phys_cell_id"]]["rsrq"] = -19
self._meas_reply = meas
def __eq__(self, other):
return super().__eq__(other) and self.vbs == other.vbs and \
self.ue == other.ue and self.meas_req == other.meas_req
def to_dict(self):
""" Return a JSON-serializable."""
out = super().to_dict()
out['vbs'] = self.vbs
out['tenant'] = self.tenant_id
out['ue'] = self.ue
out['meas_req'] = self.meas_req
out['measurements'] = self.meas
out['meas_reply'] = self.meas_reply
return out
def run_once(self):
"""Send out RRC measurements request."""
if self.tenant_id not in RUNTIME.tenants:
self.log.info("Tenant %s not found", self.tenant_id)
self.unload()
return
tenant = RUNTIME.tenants[self.tenant_id]
ue_addr = (self.vbs, self.ue)
if ue_addr not in tenant.ues:
self.log.info("UE %s not found", ue_addr)
return
ue = tenant.ues[ue_addr]
if not ue.vbs.connection or ue.vbs.connection.stream.closed():
self.log.info("VBS %s not connected", ue.vbs.addr)
return
st_req = self.meas_req
rrc_m_req = main_pb2.emage_msg()
enb_id = ether_to_hex(self.vbs)
create_header(self.module_id, enb_id, rrc_m_req.head)
# Creating a trigger message to fetch UE's RRC measurements
trigger_msg = rrc_m_req.te
trigger_msg.action = main_pb2.EA_ADD
rrc_m_msg = trigger_msg.mRRC_meas
rrc_m_req_msg = rrc_m_msg.req
rrc_m_req_msg.rnti = ue.rnti
rrc_m_req_msg.rat = RRC_STATS_RAT_TYPE[st_req["rat_type"]]
rrc_m_req_msg.measId = 0
rrc_m_req_msg.m_obj.measObjId = 0
rrc_m_req_msg.r_conf.reportConfId = 0
if st_req["rat_type"] == "EUTRA":
m_obj = rrc_m_req_msg.m_obj
measObj_EUTRA = m_obj.measObj_EUTRA
measObj_EUTRA.carrier_freq = st_req["carrier_freq"]
measObj_EUTRA.meas_bw = RRC_STATS_BW[st_req["bandwidth"]]
if "cells_to_measure" in st_req:
for c in st_req["cells_to_measure"]:
measObj_EUTRA.cells.append(st_req["cells_to_measure"][c])
if "blacklist_cells" in st_req:
for c in st_req["blacklist_cells"]:
measObj_EUTRA.bkl_cells.append(st_req["blacklist_cells"][c])
if st_req["rat_type"] == "EUTRA":
# EUTRA report configuration
r_conf = rrc_m_req_msg.r_conf
rc_EUTRA = r_conf.rc_EUTRA
# Setting default values
rc_EUTRA.hysteresis = 0
rc_EUTRA.trigg_time = configs_pb2.TTRIG_ms0
rc_EUTRA.report_quant = configs_pb2.REPQ_BOTH
rc_EUTRA.ue_rxtx_time_diff = configs_pb2.UERXTXTD_SETUP
rc_EUTRA.trigg_quant = \
RRC_STATS_TRIGGER_QUANT[st_req["trigger_quantity"]]
rc_EUTRA.max_rep_cells = st_req["max_report_cells"]
rc_EUTRA.rep_interval = \
RRC_STATS_REPORT_INTR[st_req["report_interval"]]
rc_EUTRA.rep_amount = \
RRC_STATS_NUM_REPORTS[st_req["num_of_reports"]]
if st_req["report_type"] == "periodical_ref_signal":
rc_EUTRA.periodical.purpose = \
configs_pb2.PERRP_REPORT_STRONGEST_CELLS
elif st_req["report_type"] == "A1":
a1 = rc_EUTRA.a1
if st_req["threshold1"]["type"] == "RSRP":
a1.a1_threshold.RSRP = st_req["threshold1"]["value"]
else:
a1.a1_threshold.RSRQ = st_req["threshold1"]["value"]
elif st_req["report_type"] == "A2":
a2 = rc_EUTRA.a2
if st_req["threshold1"]["type"] == "RSRP":
a2.a2_threshold.RSRP = st_req["threshold1"]["value"]
else:
a2.a2_threshold.RSRQ = st_req["threshold1"]["value"]
elif st_req["report_type"] == "A3":
a3 = rc_EUTRA.a3
a3.a3_offset = st_req["a3_offset"]
a3.report_on_leave = 1
elif st_req["report_type"] == "A4":
a4 = rc_EUTRA.a4
if st_req["threshold1"]["type"] == "RSRP":
a4.a4_threshold.RSRP = st_req["threshold1"]["value"]
else:
a4.a4_threshold.RSRQ = st_req["threshold1"]["value"]
elif st_req["report_type"] == "A5":
a5 = rc_EUTRA.a5
if st_req["threshold1"]["type"] == "RSRP":
a5.a5_threshold1.RSRP = st_req["threshold1"]["value"]
else:
a5.a5_threshold1.RSRQ = st_req["threshold1"]["value"]
if st_req["threshold2"]["type"] == "RSRP":
a5.a5_threshold2.RSRP = st_req["threshold2"]["value"]
else:
a5.a5_threshold2.RSRQ = st_req["threshold2"]["value"]
self.log.info("Sending RRC stats request to %s (id=%u)", ue.vbs.addr,
self.module_id)
ue.vbs.connection.stream_send(rrc_m_req)
ueleave(tenant_id=self.tenant_id, callback=self.ue_leave_callback)
def cleanup(self):
"""Remove this module."""
self.log.info("Cleanup %s (id=%u)", self.module_type, self.module_id)
vbses = RUNTIME.tenants[self.tenant_id].vbses
if self.vbs not in vbses:
return
vbs = vbses[self.vbs]
ue_addr = (self.vbs, self.ue)
if ue_addr not in RUNTIME.tenants[self.tenant_id].ues:
return
ue = RUNTIME.tenants[self.tenant_id].ues[ue_addr]
if not vbs.connection or vbs.connection.stream.closed():
self.log.info("VBS %s not connected", vbs.addr)
return
meas = self.meas
for m in meas.keys():
if m in ue.rrc_meas:
del ue.rrc_meas[m]
rrc_m_req = main_pb2.emage_msg()
enb_id = ether_to_hex(self.vbs)
# Transaction identifier is zero by default.
create_header(self.module_id, enb_id, rrc_m_req.head)
# Creating a trigger message to delete UE's RRC measurements trigger
trigger_msg = rrc_m_req.te
trigger_msg.action = main_pb2.EA_DEL
rrc_m_msg = trigger_msg.mRRC_meas
rrc_m_req_msg = rrc_m_msg.req
rrc_m_req_msg.rnti = ue.rnti
rrc_m_req_msg.measId = 0
rrc_m_req_msg.m_obj.measObjId = 0
rrc_m_req_msg.r_conf.reportConfId = 0
meas_req = self.meas_req
rrc_m_req_msg.rat = RRC_STATS_RAT_TYPE[meas_req["rat_type"]]
connection = vbs.connection
enb_id = connection.vbs.enb_id
vbs.connection.stream_send(rrc_m_req)
def handle_response(self, response):
"""Handle an incoming stats response message.
Args:
message, a stats response message
Returns:
None
"""
# update cache
self.meas_reply = response
# call callback
self.handle_callback(self)
class VBSRRCStatsWorker(ModuleVBSPWorker):
""" Counter worker. """
pass
def vbs_rrc_stats(**kwargs):
"""Create a new module."""
return \
RUNTIME.components[VBSRRCStatsWorker.__module__].add_module(**kwargs)
def bound_vbs_rrc_stats(self, **kwargs):
"""Create a new module (app version)."""
kwargs['tenant_id'] = self.tenant.tenant_id
kwargs['ue'] = self.addr
kwargs['vbs'] = self.vbs.addr
return vbs_rrc_stats(**kwargs)
print("printing tenatn id", self.tenant.tenant_id)
setattr(UE, VBSRRCStats.MODULE_NAME, bound_vbs_rrc_stats)
def launch():
""" Initialize the module. """
return VBSRRCStatsWorker(VBSRRCStats, PRT_VBSP_RRC_STATS)
|
|
"""The tests for the emulated Hue component."""
import json
import unittest
from aiohttp.hdrs import CONTENT_TYPE
import defusedxml.ElementTree as ET
import requests
from homeassistant import const, setup
from homeassistant.components import emulated_hue
from homeassistant.components.emulated_hue import upnp
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_OK
from tests.common import get_test_home_assistant, get_test_instance_port
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = f"http://127.0.0.1:{BRIDGE_SERVER_PORT}" + "{}"
JSON_HEADERS = {CONTENT_TYPE: const.CONTENT_TYPE_JSON}
class MockTransport:
"""Mock asyncio transport."""
def __init__(self):
"""Create a place to store the sends."""
self.sends = []
def sendto(self, response, addr):
"""Mock sendto."""
self.sends.append((response, addr))
class TestEmulatedHue(unittest.TestCase):
"""Test the emulated Hue component."""
hass = None
@classmethod
def setUpClass(cls):
"""Set up the class."""
cls.hass = hass = get_test_home_assistant()
setup.setup_component(
hass,
emulated_hue.DOMAIN,
{emulated_hue.DOMAIN: {emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT}},
)
cls.hass.start()
@classmethod
def tearDownClass(cls):
"""Stop the class."""
cls.hass.stop()
def test_upnp_discovery_basic(self):
"""Tests the UPnP basic discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """M-SEARCH * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_discovery_rootdevice(self):
"""Tests the UPnP rootdevice discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by Busch-Jaeger free@home SysAP."""
request = """M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 40
ST: upnp:rootdevice
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: upnp:rootdevice
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc::upnp:rootdevice
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_no_response(self):
"""Tests the UPnP does not response on an invalid request."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """INVALID * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
assert mock_transport.sends == []
def test_description_xml(self):
"""Test the description."""
result = requests.get(BRIDGE_URL_BASE.format("/description.xml"), timeout=5)
assert result.status_code == HTTP_OK
assert "text/xml" in result.headers["content-type"]
# Make sure the XML is parsable
try:
root = ET.fromstring(result.text)
ns = {"s": "urn:schemas-upnp-org:device-1-0"}
assert root.find("./s:device/s:serialNumber", ns).text == "001788FFFE23BFC2"
except: # noqa: E722 pylint: disable=bare-except
self.fail("description.xml is not valid XML!")
def test_create_username(self):
"""Test the creation of an username."""
request_json = {"devicetype": "my_device"}
result = requests.post(
BRIDGE_URL_BASE.format("/api"), data=json.dumps(request_json), timeout=5
)
assert result.status_code == HTTP_OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = result.json()
success_json = resp_json[0]
assert "success" in success_json
assert "username" in success_json["success"]
def test_unauthorized_view(self):
"""Test unauthorized view."""
request_json = {"devicetype": "my_device"}
result = requests.get(
BRIDGE_URL_BASE.format("/api/unauthorized"),
data=json.dumps(request_json),
timeout=5,
)
assert result.status_code == HTTP_OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = result.json()
assert len(resp_json) == 1
success_json = resp_json[0]
assert len(success_json) == 1
assert "error" in success_json
error_json = success_json["error"]
assert len(error_json) == 3
assert "/" in error_json["address"]
assert "unauthorized user" in error_json["description"]
assert "1" in error_json["type"]
def test_valid_username_request(self):
"""Test request with a valid username."""
request_json = {"invalid_key": "my_device"}
result = requests.post(
BRIDGE_URL_BASE.format("/api"), data=json.dumps(request_json), timeout=5
)
assert result.status_code == 400
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MultiDeviceIterator` and `OwnedMultiDeviceIterator` API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(MultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_inits=[0, 1, 42])))
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, prefetch_buffer_size, max_buffer_size):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testOneOnSameDevice(self):
dataset = dataset_ops.Dataset.range(12)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[0], self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 12, 3):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_0))
self.assertEqual(i + 1, self.evaluate(elem_on_1))
self.assertEqual(i + 2, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_0)
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testRepeatDevices(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[1]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testGetNextAsOptional(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1, get_elem_1 = self.evaluate(
[elem_on_1.has_value(), elem_on_1.get_value()])
has_elem_2, get_elem_2 = self.evaluate(
[elem_on_2.has_value(), elem_on_2.get_value()])
self.assertTrue(has_elem_1)
self.assertEqual(i, get_elem_1)
self.assertTrue(has_elem_2)
self.assertEqual(i + 1, get_elem_2)
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1 = elem_on_1.has_value()
has_elem_2 = elem_on_2.has_value()
self.assertFalse(self.evaluate(has_elem_1))
self.assertFalse(self.evaluate(has_elem_2))
with self.assertRaises(errors.InvalidArgumentError):
elem_1 = elem_on_1.get_value()
self.evaluate(elem_1)
with self.assertRaises(errors.InvalidArgumentError):
elem_2 = elem_on_2.get_value()
self.evaluate(elem_2)
@combinations.generate(test_base.default_test_combinations())
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], max_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next(self._devices[2])
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.graph_only_combinations())
def testMultipleInitializationsGraph(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
for _ in range(5):
self.evaluate(multi_device_iterator.initializer)
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializationsEager(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.default_test_combinations())
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
class OwnedMultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(OwnedMultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, max_buffer_size, prefetch_buffer_size):
dataset = dataset_ops.Dataset.range(1000)
mdi = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
for i, el in enumerate(mdi):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testBasicFunction(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
with ops.device(self._devices[0]):
dataset = dataset_ops.Dataset.range(10)
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(5):
el0, el1 = next(iterator)
queue.enqueue(el0)
queue.enqueue(el1)
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(test_base.eager_only_combinations())
def testFunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(
1,
init_fn,
next_fn,
finalize_fn,
output_signature=tensor_spec.TensorSpec([], dtypes.int64))
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializations(self):
dataset = dataset_ops.Dataset.range(1000)
for _ in range(5):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
for i, el in enumerate(multi_device_iterator):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for _ in range(5):
elem = next(iterator)
counter += elem[0]
counter += elem[1]
return counter
dataset = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(20)
for _ in range(10):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator)), 45)
multi_device_iterator2 = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset2, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator2)), 45)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env vpython3
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import io
import os
import sys
import tempfile
import unittest
# Mutates sys.path.
import test_env
# third_party/
from depot_tools import auto_stub
import isolate_format
from utils import file_path
FAKE_DIR = (
u'z:\\path\\to\\non_existing'
if sys.platform == 'win32' else u'/path/to/non_existing')
class IsolateFormatTest(auto_stub.TestCase):
def test_unknown_key(self):
try:
isolate_format.verify_variables({'foo': [],})
self.fail()
except AssertionError:
pass
def test_unknown_var(self):
try:
isolate_format.verify_condition({'variables': {'foo': [],}}, {})
self.fail()
except AssertionError:
pass
def test_eval_content(self):
try:
# Intrinsics are not available.
isolate_format.eval_content('map(str, [1, 2])')
self.fail()
except NameError:
pass
def test_load_isolate_as_config_empty(self):
expected = {
(): {
'isolate_dir': FAKE_DIR,
},
}
self.assertEqual(
expected,
isolate_format.load_isolate_as_config(FAKE_DIR, {}, None).flatten())
def test_load_isolate_as_config(self):
value = {
'conditions': [
['OS=="amiga" or OS=="atari" or OS=="coleco" or OS=="dendy"', {
'variables': {
'files': ['a', 'b', 'touched'],
},
}],
['OS=="atari"', {
'variables': {
'files': ['c', 'd', 'touched_a', 'x'],
'command': ['echo', 'Hello World'],
'read_only': 2,
},
}],
['OS=="amiga" or OS=="coleco" or OS=="dendy"', {
'variables': {
'files': ['e', 'f', 'touched_e', 'x'],
'command': ['echo', 'You should get an Atari'],
},
}],
['OS=="amiga"', {
'variables': {
'files': ['g'],
'read_only': 1,
},
}],
['OS=="amiga" or OS=="atari" or OS=="dendy"', {
'variables': {
'files': ['h'],
},
}],
],
}
expected = {
(None,): {
'isolate_dir': FAKE_DIR,
},
('amiga',): {
'files': ['a', 'b', 'e', 'f', 'g', 'h', 'touched', 'touched_e', 'x'],
'command': ['echo', 'You should get an Atari'],
'isolate_dir': FAKE_DIR,
'read_only': 1,
},
('atari',): {
'files': ['a', 'b', 'c', 'd', 'h', 'touched', 'touched_a', 'x'],
'command': ['echo', 'Hello World'],
'isolate_dir': FAKE_DIR,
'read_only': 2,
},
('coleco',): {
'files': ['a', 'b', 'e', 'f', 'touched', 'touched_e', 'x'],
'command': ['echo', 'You should get an Atari'],
'isolate_dir': FAKE_DIR,
},
('dendy',): {
'files': ['a', 'b', 'e', 'f', 'h', 'touched', 'touched_e', 'x'],
'command': ['echo', 'You should get an Atari'],
'isolate_dir': FAKE_DIR,
},
}
self.assertEqual(
expected, isolate_format.load_isolate_as_config(
FAKE_DIR, value, None).flatten())
def test_load_isolate_as_config_duplicate_command(self):
value = {
'variables': {
'command': ['rm', '-rf', '/'],
},
'conditions': [
['OS=="atari"', {
'variables': {
'command': ['echo', 'Hello World'],
},
}],
],
}
try:
isolate_format.load_isolate_as_config(FAKE_DIR, value, None)
self.fail()
except AssertionError:
pass
def test_load_isolate_as_config_no_variable(self):
value = {
'variables': {
'command': ['echo', 'You should get an Atari'],
'files': ['a', 'b', 'touched'],
'read_only': 1,
},
}
# The key is the empty tuple, since there is no variable to bind to.
expected = {
(): {
'command': ['echo', 'You should get an Atari'],
'files': ['a', 'b', 'touched'],
'isolate_dir': FAKE_DIR,
'read_only': 1,
},
}
self.assertEqual(
expected, isolate_format.load_isolate_as_config(
FAKE_DIR, value, None).flatten())
def test_merge_two_empty(self):
actual = isolate_format.Configs(None, ()).union(
isolate_format.load_isolate_as_config(FAKE_DIR, {}, None)).union(
isolate_format.load_isolate_as_config(FAKE_DIR, {}, None))
expected = {
(): {
'isolate_dir': FAKE_DIR,
},
}
self.assertEqual(expected, actual.flatten())
def test_load_two_conditions(self):
linux = {
'conditions': [
['OS=="linux"', {
'variables': {
'files': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac"', {
'variables': {
'files': [
'file_mac',
'file_common',
],
},
}],
],
}
expected = {
(None,): {
'isolate_dir': FAKE_DIR,
},
('linux',): {
'files': ['file_common', 'file_linux'],
'isolate_dir': FAKE_DIR,
},
('mac',): {
'files': ['file_common', 'file_mac'],
'isolate_dir': FAKE_DIR,
},
}
configs = isolate_format.Configs(None, ()).union(
isolate_format.load_isolate_as_config(FAKE_DIR, linux, None)).union(
isolate_format.load_isolate_as_config(FAKE_DIR, mac, None)
).flatten()
self.assertEqual(expected, configs)
def test_load_three_conditions(self):
linux = {
'conditions': [
['OS=="linux" and chromeos==1', {
'variables': {
'files': [
'file_linux',
'file_common',
],
},
}],
],
}
mac = {
'conditions': [
['OS=="mac" and chromeos==0', {
'variables': {
'files': [
'file_mac',
'file_common',
],
},
}],
],
}
win = {
'conditions': [
['OS=="win" and chromeos==0', {
'variables': {
'files': [
'file_win',
'file_common',
],
},
}],
],
}
expected = {
(None, None): {
'isolate_dir': FAKE_DIR,
},
('linux', 1): {
'files': ['file_common', 'file_linux'],
'isolate_dir': FAKE_DIR,
},
('mac', 0): {
'files': ['file_common', 'file_mac'],
'isolate_dir': FAKE_DIR,
},
('win', 0): {
'files': ['file_common', 'file_win'],
'isolate_dir': FAKE_DIR,
},
}
configs = isolate_format.Configs(None, ()).union(
isolate_format.load_isolate_as_config(FAKE_DIR, linux, None)).union(
isolate_format.load_isolate_as_config(FAKE_DIR, mac, None)).union(
isolate_format.load_isolate_as_config(FAKE_DIR, win, None))
self.assertEqual(expected, configs.flatten())
def test_safe_index(self):
self.assertEqual(1, isolate_format._safe_index(('a', 'b'), 'b'))
self.assertEqual(None, isolate_format._safe_index(('a', 'b'), 'c'))
def test_get_map_keys(self):
self.assertEqual(
(0, None, 1), isolate_format._get_map_keys(('a', 'b', 'c'), ('a', 'c')))
def test_map_keys(self):
self.assertEqual(
('a', None, 'c'),
isolate_format._map_keys((0, None, 1), ('a', 'c')))
def test_load_multi_variables(self):
# Load an .isolate with different condition on different variables.
data = {
'conditions': [
['OS=="abc"', {
'variables': {
'command': ['bar'],
},
}],
['CHROMEOS=="1"', {
'variables': {
'command': ['foo'],
},
}],
],
}
configs = isolate_format.load_isolate_as_config(FAKE_DIR, data, None)
self.assertEqual(('CHROMEOS', 'OS'), configs.config_variables)
flatten = dict((k, v.flatten()) for k, v in configs._by_config.items())
expected = {
(None, None): {
'isolate_dir': FAKE_DIR,
},
(None, 'abc'): {
'command': ['bar'],
'isolate_dir': FAKE_DIR,
},
('1', None): {
'command': ['foo'],
'isolate_dir': FAKE_DIR,
},
# TODO(maruel): It is a conflict.
('1', 'abc'): {
'command': ['bar'],
'isolate_dir': FAKE_DIR,
},
}
self.assertEqual(expected, flatten)
def test_union_multi_variables(self):
data1 = {
'conditions': [
['OS=="abc"', {
'variables': {
'command': ['bar'],
},
}],
],
}
data2 = {
'conditions': [
['CHROMEOS=="1"', {
'variables': {
'command': ['foo'],
},
}],
],
}
configs1 = isolate_format.load_isolate_as_config(FAKE_DIR, data1, None)
configs2 = isolate_format.load_isolate_as_config(FAKE_DIR, data2, None)
configs = configs1.union(configs2)
self.assertEqual(('CHROMEOS', 'OS'), configs.config_variables)
flatten = dict((k, v.flatten()) for k, v in configs._by_config.items())
expected = {
(None, None): {
'isolate_dir': FAKE_DIR,
},
(None, 'abc'): {
'command': ['bar'],
'isolate_dir': FAKE_DIR,
},
('1', None): {
'command': ['foo'],
'isolate_dir': FAKE_DIR,
},
}
self.assertEqual(expected, flatten)
def test_ConfigSettings_union(self):
lhs_values = {}
rhs_values = {'files': ['data/', 'test/data/']}
lhs = isolate_format.ConfigSettings(lhs_values, '/src/net/third_party/nss')
rhs = isolate_format.ConfigSettings(rhs_values, '/src/base')
out = lhs.union(rhs)
expected = {
'files': ['data/', 'test/data/'],
'isolate_dir': '/src/base',
}
self.assertEqual(expected, out.flatten())
def test_configs_comment(self):
configs = isolate_format.load_isolate_as_config(
FAKE_DIR, {}, '# Yo dawg!\n# Chill out.\n').union(
isolate_format.load_isolate_as_config(FAKE_DIR, {}, None))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
configs = isolate_format.load_isolate_as_config(FAKE_DIR, {}, None).union(
isolate_format.load_isolate_as_config(
FAKE_DIR, {}, '# Yo dawg!\n# Chill out.\n'))
self.assertEqual('# Yo dawg!\n# Chill out.\n', configs.file_comment)
# Only keep the first one.
configs = isolate_format.load_isolate_as_config(
FAKE_DIR, {}, '# Yo dawg!\n').union(
isolate_format.load_isolate_as_config(
FAKE_DIR, {}, '# Chill out.\n'))
self.assertEqual('# Yo dawg!\n', configs.file_comment)
def test_extract_comment(self):
self.assertEqual(
'# Foo\n# Bar\n', isolate_format.extract_comment('# Foo\n# Bar\n{}'))
self.assertEqual('', isolate_format.extract_comment('{}'))
def _test_pretty_print_impl(self, value, expected):
actual = io.BytesIO()
isolate_format.pretty_print(value, actual)
self.assertEqual(expected.splitlines(), actual.getvalue().splitlines())
def test_pretty_print_empty(self):
self._test_pretty_print_impl({}, b'{\n}\n')
def test_pretty_print_simple(self):
self._test_pretty_print_impl({'a': 'b'}, b'{\n \'a\': \'b\',\n}')
def test_pretty_print_mid_size(self):
value = {
'variables': {
'files': [
'file1',
'file2',
],
},
'conditions': [
[
'OS==\"foo\"',
{
'variables': {
'files': [
'dir1/',
'dir2/',
'file3',
'file4',
],
'command': ['python', '-c', 'print("H\\i\'")'],
'read_only': 2,
},
}
],
['OS==\"bar\"', {
'variables': {},
}],
],
}
isolate_format.verify_root(value, {})
# This is an .isolate format.
expected = (b"{\n"
b" 'variables': {\n"
b" 'files': [\n"
b" 'file1',\n"
b" 'file2',\n"
b" ],\n"
b" },\n"
b" 'conditions': [\n"
b" ['OS==\"foo\"', {\n"
b" 'variables': {\n"
b" 'command': [\n"
b" 'python',\n"
b" '-c',\n"
b" 'print(\"H\\i\'\")',\n"
b" ],\n"
b" 'files': [\n"
b" 'dir1/',\n"
b" 'dir2/',\n"
b" 'file3',\n"
b" 'file4',\n"
b" ],\n"
b" 'read_only': 2,\n"
b" },\n"
b" }],\n"
b" ['OS==\"bar\"', {\n"
b" 'variables': {\n"
b" },\n"
b" }],\n"
b" ],\n"
b"}\n")
self._test_pretty_print_impl(value, expected)
def test_convert_old_to_new_else(self):
isolate_with_else_clauses = {
'conditions': [
['OS=="mac"', {
'variables': {'foo': 'bar'},
}, {
'variables': {'x': 'y'},
}],
],
}
with self.assertRaises(isolate_format.IsolateError):
isolate_format.load_isolate_as_config(
FAKE_DIR, isolate_with_else_clauses, None)
def test_match_configs(self):
expectations = [
(
('OS=="win"', ('OS',), [('win',), ('mac',), ('linux',)]),
[('win',)],
),
(
(
'(foo==1 or foo==2) and bar=="b"',
['foo', 'bar'],
[(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')],
),
[(1, 'b'), (2, 'b')],
),
(
(
'bar=="b"',
['foo', 'bar'],
[(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')],
),
# TODO(maruel): When a free variable match is found, it should not
# list all the bounded values in addition. The problem is when an
# intersection of two different bound variables that are tested
# singly in two different conditions.
[(1, 'b'), (2, 'b'), (None, 'b')],
),
(
(
'foo==1 or bar=="b"',
['foo', 'bar'],
[(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')],
),
# TODO(maruel): (None, 'b') would match.
# It is hard in this case to realize that each of the variables
# 'foo' and 'bar' can be unbounded in a specific case.
[(1, 'a'), (1, 'b'), (1, None), (2, 'b')],
),
]
for data, expected in expectations:
self.assertEqual(expected, isolate_format.match_configs(*data))
def test_load_with_globals(self):
values = {
'variables': {
'files': [
'file_common',
],
},
'conditions': [
['OS=="linux"', {
'variables': {
'files': [
'file_linux',
],
'read_only': 1,
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'file_non_linux',
],
'read_only': 0,
},
}],
],
}
expected = {
(None,): {
'files': [
'file_common',
],
'isolate_dir': FAKE_DIR,
},
('linux',): {
'files': [
'file_linux',
],
'isolate_dir': FAKE_DIR,
'read_only': 1,
},
('mac',): {
'files': [
'file_non_linux',
],
'isolate_dir': FAKE_DIR,
'read_only': 0,
},
('win',): {
'files': [
'file_non_linux',
],
'isolate_dir': FAKE_DIR,
'read_only': 0,
},
}
actual = isolate_format.load_isolate_as_config(FAKE_DIR, values, None)
self.assertEqual(expected, actual.flatten())
def test_and_or_bug(self):
a = {
'conditions': [
['use_x11==0', {
'variables': {
'command': ['foo', 'x11=0'],
},
}],
['OS=="linux" and chromeos==0', {
'variables': {
'command': ['foo', 'linux'],
},
}],
],
}
def load_included_isolate(isolate_dir, _isolate_path):
return isolate_format.load_isolate_as_config(isolate_dir, a, None)
self.mock(isolate_format, 'load_included_isolate', load_included_isolate)
b = {
'conditions': [
['use_x11==1', {
'variables': {
'command': ['foo', 'x11=1'],
},
}],
],
'includes': [
'a',
],
}
variables = {'use_x11': 1, 'OS': 'linux', 'chromeos': 0}
config = isolate_format.load_isolate_for_config('/', str(b), variables)
self.assertEqual((['foo', 'x11=1'], [], None, '/'), config)
variables = {'use_x11': 0, 'OS': 'linux', 'chromeos': 0}
config = isolate_format.load_isolate_for_config('/', str(b), variables)
self.assertEqual(([], [], None, '/'), config)
class IsolateFormatTmpDirTest(unittest.TestCase):
def setUp(self):
super(IsolateFormatTmpDirTest, self).setUp()
self.tempdir = tempfile.mkdtemp(prefix=u'isolate_')
def tearDown(self):
try:
file_path.rmtree(self.tempdir)
finally:
super(IsolateFormatTmpDirTest, self).tearDown()
def test_load_with_includes(self):
included_isolate = {
'variables': {
'files': [
'file_common',
],
},
'conditions': [
['OS=="linux"', {
'variables': {
'files': [
'file_linux',
],
'read_only': 1,
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'file_non_linux',
],
'read_only': 0,
},
}],
],
}
with open(os.path.join(self.tempdir, 'included.isolate'), 'wb') as f:
isolate_format.pretty_print(included_isolate, f)
values = {
'includes': ['included.isolate'],
'variables': {
'files': [
'file_less_common',
],
},
'conditions': [
['OS=="mac"', {
'variables': {
'files': [
'file_mac',
],
'read_only': 2,
},
}],
],
}
actual = isolate_format.load_isolate_as_config(self.tempdir, values, None)
expected = {
(None,): {
'files': [
'file_common',
'file_less_common',
],
'isolate_dir': self.tempdir,
},
('linux',): {
'files': [
'file_linux',
],
'isolate_dir': self.tempdir,
'read_only': 1,
},
('mac',): {
'files': [
'file_mac',
'file_non_linux',
],
'isolate_dir': self.tempdir,
'read_only': 2,
},
('win',): {
'files': [
'file_non_linux',
],
'isolate_dir': self.tempdir,
'read_only': 0,
},
}
self.assertEqual(expected, actual.flatten())
def test_load_with_includes_with_commands(self):
# This one is messy. Check that isolate_dir is the expected value. To
# achieve this, put the .isolate files into subdirectories.
dir_1 = os.path.join(self.tempdir, '1')
dir_3 = os.path.join(self.tempdir, '3')
dir_3_2 = os.path.join(self.tempdir, '3', '2')
os.mkdir(dir_1)
os.mkdir(dir_3)
os.mkdir(dir_3_2)
isolate1 = {
'conditions': [
['OS=="amiga" or OS=="win"', {
'variables': {
'command': [
'foo', 'amiga_or_win',
],
},
}],
['OS=="linux"', {
'variables': {
'command': [
'foo', 'linux',
],
'files': [
'file_linux',
],
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'file_non_linux',
],
},
}],
],
}
isolate2 = {
'conditions': [
['OS=="linux" or OS=="mac"', {
'variables': {
'command': [
'foo', 'linux_or_mac',
],
'files': [
'other/file',
],
},
}],
],
}
# Do not define command in isolate3, otherwise commands in the other
# included .isolated will be ignored.
isolate3 = {
'includes': [
'../1/isolate1.isolate',
'2/isolate2.isolate',
],
'conditions': [
['OS=="amiga"', {
'variables': {
'files': [
'file_amiga',
],
},
}],
['OS=="mac"', {
'variables': {
'files': [
'file_mac',
],
},
}],
],
}
# No need to write isolate3.
with open(os.path.join(dir_1, 'isolate1.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate1, f)
with open(os.path.join(dir_3_2, 'isolate2.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate2, f)
# The 'isolate_dir' are important, they are what will be used when
# defining the final isolate_dir to use to run the command in the .isolated
# file.
actual = isolate_format.load_isolate_as_config(dir_3, isolate3, None)
expected = {
(None,): {
# TODO(maruel): See TODO in ConfigSettings.flatten().
# TODO(maruel): If kept, in this case dir_3 should be selected.
'isolate_dir': dir_1,
},
('amiga',): {
'command': ['foo', 'amiga_or_win'],
'files': [
# Note that the file was rebased from isolate1. This is important,
# isolate1 represent the canonical root path because it is the one
# that defined the command.
'../3/file_amiga',
],
'isolate_dir': dir_1,
},
('linux',): {
# Last included takes precedence. *command comes from isolate2*, so
# it becomes the canonical root, so reference to file from isolate1 is
# via '../../1'.
'command': ['foo', 'linux_or_mac'],
'files': [
'../../1/file_linux',
'other/file',
],
'isolate_dir': dir_3_2,
},
('mac',): {
'command': ['foo', 'linux_or_mac'],
'files': [
'../../1/file_non_linux',
'../file_mac',
'other/file',
],
'isolate_dir': dir_3_2,
},
('win',): {
# command comes from isolate1.
'command': ['foo', 'amiga_or_win'],
'files': [
# While this may be surprising, this is because the command was
# defined in isolate1, not isolate3.
'file_non_linux',
],
'isolate_dir': dir_1,
},
}
self.assertEqual(expected, actual.flatten())
def test_load_with_includes_with_commands_and_variables(self):
# This one is the pinacle of fun. Check that isolate_dir is the expected
# value. To achieve this, put the .isolate files into subdirectories.
dir_1 = os.path.join(self.tempdir, '1')
dir_3 = os.path.join(self.tempdir, '3')
dir_3_2 = os.path.join(self.tempdir, '3', '2')
os.mkdir(dir_1)
os.mkdir(dir_3)
os.mkdir(dir_3_2)
isolate1 = {
'conditions': [
['OS=="amiga" or OS=="win"', {
'variables': {
'command': [
'foo', 'amiga_or_win', '<(PATH)',
],
},
}],
['OS=="linux"', {
'variables': {
'command': [
'foo', 'linux', '<(PATH)',
],
'files': [
'<(PATH)/file_linux',
],
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'<(PATH)/file_non_linux',
],
},
}],
],
}
isolate2 = {
'conditions': [
['OS=="linux" or OS=="mac"', {
'variables': {
'command': [
'foo', 'linux_or_mac', '<(PATH)',
],
'files': [
'<(PATH)/other/file',
],
},
}],
],
}
# Do not define command in isolate3, otherwise commands in the other
# included .isolated will be ignored.
isolate3 = {
'includes': [
'../1/isolate1.isolate',
'2/isolate2.isolate',
],
'conditions': [
['OS=="amiga"', {
'variables': {
'files': [
'<(PATH)/file_amiga',
],
},
}],
['OS=="mac"', {
'variables': {
'files': [
'<(PATH)/file_mac',
],
},
}],
],
}
# No need to write isolate3.
with open(os.path.join(dir_1, 'isolate1.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate1, f)
with open(os.path.join(dir_3_2, 'isolate2.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate2, f)
# The 'isolate_dir' are important, they are what will be used when
# defining the final isolate_dir to use to run the command in the .isolated
# file.
actual = isolate_format.load_isolate_as_config(dir_3, isolate3, None)
expected = {
(None,): {
'isolate_dir': dir_1,
},
('amiga',): {
'command': ['foo', 'amiga_or_win', '<(PATH)'],
'files': [
'<(PATH)/file_amiga',
],
'isolate_dir': dir_1,
},
('linux',): {
# Last included takes precedence. *command comes from isolate2*, so
# it becomes the canonical root, so reference to file from isolate1 is
# via '../../1'.
'command': ['foo', 'linux_or_mac', '<(PATH)'],
'files': [
'<(PATH)/file_linux',
'<(PATH)/other/file',
],
'isolate_dir': dir_3_2,
},
('mac',): {
'command': ['foo', 'linux_or_mac', '<(PATH)'],
'files': [
'<(PATH)/file_mac',
'<(PATH)/file_non_linux',
'<(PATH)/other/file',
],
'isolate_dir': dir_3_2,
},
('win',): {
# command comes from isolate1.
'command': ['foo', 'amiga_or_win', '<(PATH)'],
'files': [
'<(PATH)/file_non_linux',
],
'isolate_dir': dir_1,
},
}
self.assertEqual(expected, actual.flatten())
if __name__ == '__main__':
test_env.main()
|
|
import struct
import time
import cherrypy
from cherrypy._cpcompat import basestring, BytesIO, ntob, set, unicodestr
from cherrypy.lib import file_generator
from cherrypy.lib import set_vary_header
def decode(encoding=None, default_encoding='utf-8'):
"""Replace or extend the list of charsets used to decode a request entity.
Either argument may be a single string or a list of strings.
encoding
If not None, restricts the set of charsets attempted while decoding
a request entity to the given set (even if a different charset is given in
the Content-Type request header).
default_encoding
Only in effect if the 'encoding' argument is not given.
If given, the set of charsets attempted while decoding a request entity is
*extended* with the given value(s).
"""
body = cherrypy.request.body
if encoding is not None:
if not isinstance(encoding, list):
encoding = [encoding]
body.attempt_charsets = encoding
elif default_encoding:
if not isinstance(default_encoding, list):
default_encoding = [default_encoding]
body.attempt_charsets = body.attempt_charsets + default_encoding
class ResponseEncoder:
default_encoding = 'utf-8'
failmsg = "Response body could not be encoded with %r."
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, unicodestr):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
try:
body = []
for chunk in self.body:
if isinstance(chunk, unicodestr):
chunk = chunk.encode(encoding, self.errors)
body.append(chunk)
self.body = body
except (LookupError, UnicodeError):
return False
else:
return True
def find_acceptable_charset(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log('response.stream %r' % response.stream, 'TOOLS.ENCODE')
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers["Content-Length"]
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log('Specified encoding %r' % encoding, 'TOOLS.ENCODE')
if (not charsets) or "*" in charsets or encoding in charsets:
if self.debug:
cherrypy.log('Attempting encoding %r' % encoding, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log('Attempting default encoding %r' %
self.default_encoding, 'TOOLS.ENCODE')
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(500, self.failmsg % self.default_encoding)
else:
for element in encs:
if element.qvalue > 0:
if element.value == "*":
# Matches any charset. Try our default.
if self.debug:
cherrypy.log('Attempting default encoding due '
'to %r' % element, 'TOOLS.ENCODE')
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log('Attempting encoding %s (qvalue >'
'0)' % element, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
if "*" not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log('Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE')
if encoder(iso):
return iso
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = "Your client did not send an Accept-Charset header."
else:
msg = "Your client sent this Accept-Charset header: %s." % ac
msg += " We tried these charsets: %s." % ", ".join(self.attempted_charsets)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
if isinstance(self.body, basestring):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if self.body:
self.body = [self.body]
else:
# [''] doesn't evaluate to False, so replace it with [].
self.body = []
elif hasattr(self.body, 'read'):
self.body = file_generator(self.body)
elif self.body is None:
self.body = []
ct = response.headers.elements("Content-Type")
if self.debug:
cherrypy.log('Content-Type: %r' % [str(h) for h in ct], 'TOOLS.ENCODE')
if ct:
ct = ct[0]
if self.text_only:
if ct.value.lower().startswith("text/"):
if self.debug:
cherrypy.log('Content-Type %s starts with "text/"' % ct,
'TOOLS.ENCODE')
do_find = True
else:
if self.debug:
cherrypy.log('Not finding because Content-Type %s does '
'not start with "text/"' % ct,
'TOOLS.ENCODE')
do_find = False
else:
if self.debug:
cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE')
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.add_charset:
if self.debug:
cherrypy.log('Setting Content-Type %s' % ct,
'TOOLS.ENCODE')
response.headers["Content-Type"] = str(ct)
return self.body
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See http://www.gzip.org/zlib/rfc-gzip.html
yield ntob('\x1f\x8b') # ID1 and ID2: gzip marker
yield ntob('\x08') # CM: compression method
yield ntob('\x00') # FLG: none set
# MTIME: 4 bytes
yield struct.pack("<L", int(time.time()) & int('FFFFFFFF', 16))
yield ntob('\x02') # XFL: max compression, slowest algo
yield ntob('\xff') # OS: unknown
crc = zlib.crc32(ntob(""))
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack("<L", crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack("<L", size & int('FFFFFFFF', 16))
def decompress(body):
import gzip
zbuf = BytesIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
The provided list of mime-types must be of one of the following form:
* type/subtype
* type/*
* type/*+subtype
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, "Accept-Encoding")
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, "cached", False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log('Non-zero identity qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log('Zero gzip qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if ct not in mime_types:
# If the list of provided mime-types contains tokens
# such as 'text/*' or 'application/*+xml',
# we go through them and find the most appropriate one
# based on the given content-type.
# The pattern matching is only caring about the most
# common cases, as stated above, and doesn't support
# for extra parameters.
found = False
if '/' in ct:
ct_media_type, ct_sub_type = ct.split('/')
for mime_type in mime_types:
if '/' in mime_type:
media_type, sub_type = mime_type.split('/')
if ct_media_type == media_type:
if sub_type == '*':
found = True
break
elif '+' in sub_type and '+' in ct_sub_type:
ct_left, ct_right = ct_sub_type.split('+')
left, right = sub_type.split('+')
if left == '*' and ct_right == right:
found = True
break
if not found:
if debug:
cherrypy.log('Content-Type %s not in mime_types %r' %
(ct, mime_types), context='TOOLS.GZIP')
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, "identity, gzip").set_response()
|
|
# -*- coding: utf-8 -*-
import os
import urllib
import uuid
from pymongo import MongoClient
import requests
from framework.mongo.utils import to_mongo_key
from website.models import Node
from website.addons.wiki import settings as wiki_settings
from website.addons.wiki.exceptions import InvalidVersionError
def generate_private_uuid(node, wname):
"""
Generate private uuid for internal use in sharejs namespacing.
Note that this will NEVER be passed to to the client or sharejs.
"""
private_uuid = str(uuid.uuid1())
wiki_key = to_mongo_key(wname)
node.wiki_private_uuids[wiki_key] = private_uuid
node.save()
return private_uuid
def get_sharejs_uuid(node, wname):
"""
Format private uuid into the form used in mongo and sharejs.
This includes node's primary ID to prevent fork namespace collision
"""
wiki_key = to_mongo_key(wname)
private_uuid = node.wiki_private_uuids.get(wiki_key)
return str(uuid.uuid5(
uuid.UUID(private_uuid),
str(node._id)
)) if private_uuid else None
def delete_share_doc(node, wname):
"""Deletes share document and removes namespace from model."""
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
db['docs'].remove({'_id': sharejs_uuid})
db['docs_ops'].remove({'name': sharejs_uuid})
wiki_key = to_mongo_key(wname)
del node.wiki_private_uuids[wiki_key]
node.save()
def migrate_uuid(node, wname):
"""Migrates uuid to new namespace."""
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_uuid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [
user._id for user in node.contributors
if node.has_permission(user, 'write')
]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
def share_db():
"""Generate db client for sharejs db"""
client = MongoClient(wiki_settings.SHAREJS_DB_URL)
return client[wiki_settings.SHAREJS_DB_NAME]
def get_sharejs_content(node, wname):
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
return doc_item['_data'] if doc_item else ''
def broadcast_to_sharejs(action, sharejs_uuid, node=None, wiki_name='home', data=None):
"""
Broadcast an action to all documents connected to a wiki.
Actions include 'lock', 'unlock', 'redirect', and 'delete'
'redirect' and 'delete' both require a node to be specified
'unlock' requires data to be a list of contributors with write permission
"""
url = 'http://{host}:{port}/{action}/{id}/'.format(
host=wiki_settings.SHAREJS_HOST,
port=wiki_settings.SHAREJS_PORT,
action=action,
id=sharejs_uuid
)
if action == 'redirect' or action == 'delete':
redirect_url = urllib.quote(
node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True),
safe='',
)
url = os.path.join(url, redirect_url)
try:
requests.post(url, json=data)
except requests.ConnectionError:
pass # Assume sharejs is not online
def format_wiki_version(version, num_versions, allow_preview):
"""
:param str version: 'preview', 'current', 'previous', '1', '2', ...
:param int num_versions:
:param allow_preview: True if view, False if compare
"""
if not version:
return
if version.isdigit():
version = int(version)
if version > num_versions or version < 1:
raise InvalidVersionError
elif version == num_versions:
return 'current'
elif version == num_versions - 1:
return 'previous'
elif version != 'current' and version != 'previous':
if allow_preview and version == 'preview':
return version
raise InvalidVersionError
elif version == 'previous' and num_versions == 0:
raise InvalidVersionError
return version
def serialize_wiki_settings(user, node_ids):
""" Format wiki data for project settings page
:param user: modular odm User object
:param node_ids: list of parent project ids
:return: treebeard-formatted data
"""
items = []
for node_id in node_ids:
node = Node.load(node_id)
assert node, '{} is not a valid Node.'.format(node_id)
can_read = node.has_permission(user, 'read')
include_wiki_settings = node.include_wiki_settings(user)
if not include_wiki_settings:
continue
children = []
if node.admin_public_wiki(user):
children.append({
'select': {
'title': 'permission',
'permission':
'public'
if node.get_addon('wiki').is_publicly_editable
else 'private'
},
})
children.extend(serialize_wiki_settings(
user,
[
n._id
for n in node.nodes
if n.primary and
not n.is_deleted
]
))
item = {
'node': {
'id': node_id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
},
'children': children,
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
},
}
items.append(item)
return items
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.8,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=8,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0
# output_central_value=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 64,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 64,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 1024,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
|
"""Test check_config script."""
import asyncio
import logging
import os
import unittest
import homeassistant.scripts.check_config as check_config
from tests.common import patch_yaml_files, get_test_config_dir
_LOGGER = logging.getLogger(__name__)
BASE_CONFIG = (
'homeassistant:\n'
' name: Home\n'
' latitude: -26.107361\n'
' longitude: 28.054500\n'
' elevation: 1600\n'
' unit_system: metric\n'
' time_zone: GMT\n'
'\n\n'
)
def change_yaml_files(check_dict):
"""Change the ['yaml_files'] property and remove the config path.
Also removes other files like service.yaml that gets loaded
"""
root = get_test_config_dir()
keys = check_dict['yaml_files'].keys()
check_dict['yaml_files'] = []
for key in sorted(keys):
if not key.startswith('/'):
check_dict['yaml_files'].append(key)
if key.startswith(root):
check_dict['yaml_files'].append('...' + key[len(root):])
def tearDownModule(self): # pylint: disable=invalid-name
"""Clean files."""
# .HA_VERSION created during bootstrap's config update
path = get_test_config_dir('.HA_VERSION')
if os.path.isfile(path):
os.remove(path)
class TestCheckConfig(unittest.TestCase):
"""Tests for the homeassistant.scripts.check_config module."""
def setUp(self):
"""Prepare the test."""
# Somewhere in the tests our event loop gets killed,
# this ensures we have one.
try:
asyncio.get_event_loop()
except (RuntimeError, AssertionError):
# Py35: RuntimeError
# Py34: AssertionError
asyncio.set_event_loop(asyncio.new_event_loop())
# pylint: disable=no-self-use,invalid-name
def test_config_platform_valid(self):
"""Test a valid platform setup."""
files = {
'light.yaml': BASE_CONFIG + 'light:\n platform: demo',
}
with patch_yaml_files(files):
res = check_config.check(get_test_config_dir('light.yaml'))
change_yaml_files(res)
self.assertDictEqual({
'components': {'light': [{'platform': 'demo'}]},
'except': {},
'secret_cache': {},
'secrets': {},
'yaml_files': ['.../light.yaml']
}, res)
def test_config_component_platform_fail_validation(self):
"""Test errors if component & platform not found."""
files = {
'component.yaml': BASE_CONFIG + 'http:\n password: err123',
}
with patch_yaml_files(files):
res = check_config.check(get_test_config_dir('component.yaml'))
change_yaml_files(res)
self.assertDictEqual({}, res['components'])
res['except'].pop(check_config.ERROR_STR)
self.assertDictEqual(
{'http': {'password': 'err123'}},
res['except']
)
self.assertDictEqual({}, res['secret_cache'])
self.assertDictEqual({}, res['secrets'])
self.assertListEqual(['.../component.yaml'], res['yaml_files'])
files = {
'platform.yaml': (BASE_CONFIG + 'mqtt:\n\n'
'light:\n platform: mqtt_json'),
}
with patch_yaml_files(files):
res = check_config.check(get_test_config_dir('platform.yaml'))
change_yaml_files(res)
self.assertDictEqual(
{'mqtt': {
'keepalive': 60,
'port': 1883,
'protocol': '3.1.1',
'discovery': False,
'discovery_prefix': 'homeassistant',
},
'light': []},
res['components']
)
self.assertDictEqual(
{'light.mqtt_json': {'platform': 'mqtt_json'}},
res['except']
)
self.assertDictEqual({}, res['secret_cache'])
self.assertDictEqual({}, res['secrets'])
self.assertListEqual(['.../platform.yaml'], res['yaml_files'])
def test_component_platform_not_found(self):
"""Test errors if component or platform not found."""
files = {
'badcomponent.yaml': BASE_CONFIG + 'beer:',
'badplatform.yaml': BASE_CONFIG + 'light:\n platform: beer',
}
with patch_yaml_files(files):
res = check_config.check(get_test_config_dir('badcomponent.yaml'))
change_yaml_files(res)
self.assertDictEqual({}, res['components'])
self.assertDictEqual({
check_config.ERROR_STR: [
'Component not found: beer',
'Setup failed for beer: Component not found.']
}, res['except'])
self.assertDictEqual({}, res['secret_cache'])
self.assertDictEqual({}, res['secrets'])
self.assertListEqual(['.../badcomponent.yaml'], res['yaml_files'])
res = check_config.check(get_test_config_dir('badplatform.yaml'))
change_yaml_files(res)
assert res['components'] == {'light': []}
assert res['except'] == {
check_config.ERROR_STR: [
'Platform not found: light.beer',
]}
self.assertDictEqual({}, res['secret_cache'])
self.assertDictEqual({}, res['secrets'])
self.assertListEqual(['.../badplatform.yaml'], res['yaml_files'])
def test_secrets(self):
"""Test secrets config checking method."""
files = {
get_test_config_dir('secret.yaml'): (
BASE_CONFIG +
'http:\n'
' api_password: !secret http_pw'),
'secrets.yaml': ('logger: debug\n'
'http_pw: abc123'),
}
self.maxDiff = None
with patch_yaml_files(files):
config_path = get_test_config_dir('secret.yaml')
secrets_path = get_test_config_dir('secrets.yaml')
res = check_config.check(config_path)
change_yaml_files(res)
# convert secrets OrderedDict to dict for assertequal
for key, val in res['secret_cache'].items():
res['secret_cache'][key] = dict(val)
self.assertDictEqual({
'components': {'http': {'api_password': 'abc123',
'cors_allowed_origins': [],
'development': '0',
'ip_ban_enabled': True,
'login_attempts_threshold': -1,
'server_host': '0.0.0.0',
'server_port': 8123,
'ssl_certificate': None,
'ssl_key': None,
'trusted_networks': [],
'use_x_forwarded_for': False}},
'except': {},
'secret_cache': {secrets_path: {'http_pw': 'abc123'}},
'secrets': {'http_pw': 'abc123'},
'yaml_files': ['.../secret.yaml', '.../secrets.yaml']
}, res)
def test_package_invalid(self): \
# pylint: disable=no-self-use,invalid-name
"""Test a valid platform setup."""
files = {
'bad.yaml': BASE_CONFIG + (' packages:\n'
' p1:\n'
' group: ["a"]'),
}
with patch_yaml_files(files):
res = check_config.check(get_test_config_dir('bad.yaml'))
change_yaml_files(res)
err = res['except'].pop('homeassistant.packages.p1')
assert res['except'] == {}
assert err == {'group': ['a']}
assert res['yaml_files'] == ['.../bad.yaml']
assert res['components'] == {}
assert res['secret_cache'] == {}
assert res['secrets'] == {}
|
|
# PLAN
# for each GNS placename shapefile
# for each year
# add relationship to gaul adm3 for that year
# add relationship to gaul adm2 for that year
# add relationship to gaul adm1 for that year
# add relationship to gaul country for that year
# for each gaul country relationship
# save as country year specific placename shapefile
# - gaul country
# - 1990
# - 1991
# - ....
# ...geomatch routine...
# for row in data:
# matches = []
# prov3data = getprovdata(findgaulcountryame(row[country]))
# for refrow in prov3data:
# citymatch = similar(row[city],refrow[city])
# if citymatch > 80:
# refrow[countrymatch] = similar(row[country],refrow[country])
# refrow[countrymatch] = similar(row[adm1],refrow[adm1])
# matches.append(refrow)
import sqlite3
import cPickle as pickle
from itertools import izip, groupby
import shapefile as pyshp
import shapely
from shapely.geometry.geo import asShape, mapping
from shapely.prepared import prep
import rtree
# refdata has unlimited fields containing match names
class RefData:
def __init__(self, filepath, encoding="utf8"):
self.reader = pyshp.Reader(filepath)
self.fields = [f[0] for f in self.reader.fields[1:]]
self.encoding = encoding
self.relationships = dict()
def add_relationship(self, tag, refdata, namefield, idfields=None):
# refdata can either exist from before, or be created via spatial overlap
self.relationships[tag] = dict(refdata=refdata, namefield=namefield)
def create(self, savepath):
# TODO: ALLOW TIME DIMENSION AND OTHER SUBGROUPINGS...
# MAYBE EVEN NESTED SUBFOLDERS FOR SUPER QUICK ACCESS
# MOST IMPORTANT: IF SELF IS POINTS AND OTHER IS POLYGON, THEN MUCH FASTER TO LOOP POLYS THEN JOIN WITH ALL MATCHING POINTS
# PROB ALSO GOOD TO JOIN ENTIRE ROWS MULTIPLE TIMES INSTEAD OF KEEPING ONE ROW AND ONLY WRITING A SINGLE DELIMITED STRING
# ...
def rowdecode(row):
return [val.decode(self.encoding) if isinstance(val, basestring) else val for val in row]
# routine for geomatching one relationship
def geomatch(tag, subdict):
# optimize if self is points or has more items
if self.reader.shapeType in (pyshp.POINT, pyshp.MULTIPOINT) or len(self.reader) > len(subdict["refdata"].reader):
# create spindex
if not hasattr(self, "spindex"):
self.spindex = rtree.index.Index()
for i,shp in enumerate(self.reader.iterShapes()):
bbox = [shp.points[0][0],shp.points[0][1],shp.points[0][0],shp.points[0][1]] if shp.shapeType == pyshp.POINT else shp.bbox
self.spindex.insert(i, bbox)
# first reverse matching for speed
def revmatches():
for row,shp in izip(subdict["refdata"].reader.iterRecords(), subdict["refdata"].reader.iterShapes()):
#if dict(zip(self.fields, row))["SOV0NAME"] != "Russia": continue
#print "...",row[1] #row[4],row[14]
print str(row)[:100]
prepped = prep(asShape(shp)) # prepares geometry for many intersection tests (maybe only useful if is polygon and other is points, but not sure)
bbox = [shp.points[0][0],shp.points[0][1],shp.points[0][0],shp.points[0][1]] if shp.shapeType == pyshp.POINT else shp.bbox
ilist = self.spindex.intersection(bbox)
for i in ilist:
othershp = self.reader.shape(i)
if prepped.intersects(asShape(othershp)):
yield row,i
lookups = dict()
key = lambda(row,i): i
for i,items in groupby(sorted(revmatches(), key=key), key=key):
names = []
for matchrow,i in items:
name = dict(zip(subdict["refdata"].fields, matchrow))[subdict["namefield"]]
name = name.decode(subdict["refdata"].encoding) if isinstance(name, basestring) else name
names.append(name)
matches = "|".join(names)
lookups[i] = matches
for i,row in enumerate(self.reader.iterRecords()):
matches = lookups.get(i, None)
print i, matches
yield matches
else:
# create spindex
subdict["spindex"] = rtree.index.Index()
for i,shp in enumerate(subdict["refdata"].reader.iterShapes()):
bbox = [shp.points[0][0],shp.points[0][1],shp.points[0][0],shp.points[0][1]] if shp.shapeType == pyshp.POINT else shp.bbox
subdict["spindex"].insert(i, bbox)
# match each
for row,shp in izip(self.reader.iterRecords(), self.reader.iterShapes()):
#if dict(zip(self.fields, row))["SOV0NAME"] != "Russia": continue
#print "...",row[1] #row[4],row[14]
prepped = prep(asShape(shp)) # prepares geometry for many intersection tests (maybe only useful if is polygon and other is points, but not sure)
subdict["matches"] = []
refdata = subdict["refdata"]
namefield = subdict["namefield"]
spindex = subdict["spindex"]
bbox = [shp.points[0][0],shp.points[0][1],shp.points[0][0],shp.points[0][1]] if shp.shapeType == pyshp.POINT else shp.bbox
ilist = spindex.intersection(bbox)
for i in ilist:
othershp = refdata.reader.shape(i)
#for otherrow,othershp in zip(refdata.reader.iterRecords(), refdata.reader.iterShapes()):
if prepped.intersects(asShape(othershp)):
otherrow = refdata.reader.record(i)
otherrow = rowdecode(otherrow)
name = dict(zip(refdata.fields, otherrow))[namefield]
subdict["matches"].append(name)
subdict["matches"] = "|".join(subdict["matches"])
print row, subdict["matches"]
yield subdict["matches"]
# create iterators for all relationships
reliters = []
for tag,subdict in self.relationships.items():
reliters.append(list(geomatch(tag,subdict))) # must be list, to avoid simultaneous iterating
# finally, zip original rows with all relationship iterators
def outrows():
matchrows = izip(*reliters)
for feat,matchrow in izip(self.reader.iterShapeRecords(), matchrows):
newrow = rowdecode(feat.record) + [feat.shape.__geo_interface__] + list(matchrow)
yield newrow
# setup db writer (AND ADD REL FIELDS...)
import os
if os.path.exists(savepath) and input("Overwrite %s? " % savepath):
os.remove(savepath)
db = sqlite3.connect(savepath, detect_types=sqlite3.PARSE_DECLTYPES)
sqlite3.register_adapter(dict, pickle.dumps)
sqlite3.register_converter("dict", pickle.loads)
def field2col(name,typ,size,deci):
if typ == "C":
return name,"text"
elif typ == "N" and deci == 0:
return name,"int"
elif (typ == "N" and deci > 0) or typ == "F":
return name,"real"
else:
raise Exception("Unknown type %s" % typ)
fields = [f for f in self.reader.fields[1:]]
columns = [field2col(name,typ,size,deci) for name,typ,size,deci in fields]
columns += [("geojson","dict")]
columns += [(tag,"text") for tag in self.relationships.keys()] # add extra fields (force text)
columnstring = ", ".join(("%s %s" % (name,typ) for name,typ in columns))
print columnstring
db.execute("""
CREATE TABLE data (%s);
""" % columnstring
)
# batch write to file
question_marks = ", ".join("?" for _ in range(len(columns)))
db.executemany("""
INSERT INTO data VALUES (%s);
""" % question_marks, outrows())
db.commit()
db.close()
class StreamData(object):
def __init__(self, fields, featgen):
self.fields = fields
self.featgen = featgen
def __iter__(self):
for feat in self.featgen():
yield feat
|
|
from datetime import date, datetime, timedelta
from itertools import permutations
import logging
"""
The str(f|p)time format for ISO datetime strings with minute-precision and the
expected length for such strings.
"""
ISO_FORMAT = '%Y-%m-%dT%H:%M'
ISO_FORMAT_LEN = 16
class Parser:
"""
Provides methods for converting user input into time units.
"""
def __init__(self, now=None):
"""
Constructor. Expects a datetime instance as argument; the Parser does
not check the time itself.
"""
self.now = now
self.log = logging.getLogger(__name__)
def _try(self, funcs, args):
"""
Expects a [] of one-argument functions and a [] of arguments. Keeping
the order of the funcs, tries all permutations of the args and returns
[(func_result,),] of the combinations that did not raise ValueError.
Of course, the number of funcs and args must be equal.
"""
combos = []
for args_li in permutations(args):
res_li = []
try:
for index, arg in enumerate(args_li):
res_li.append(funcs[index](arg))
except ValueError:
pass
else:
combos.append(tuple(res_li))
return combos
def _get_year(self, s):
"""
Returns the year represented by the given string by trying the year
strptime directives. Raises ValueError if unsuccessful.
"""
for code in ['%Y', '%y']:
try:
dt = datetime.strptime(s, code)
except ValueError:
continue
else:
return dt.year
else:
raise ValueError('Could not extract year: {}'.format(s))
def _get_month(self, s):
"""
Returns the month represented by the given string by trying the month
strptime directives. Raises ValueError if unsuccessful.
"""
s = s.lower()
for code in ['%m', '%b', '%B']:
try:
dt = datetime.strptime(s, code)
except ValueError:
continue
else:
return dt.month
else:
raise ValueError('Could not extract month: {}'.format(s))
def _get_day(self, s):
"""
Returns the day represented by the given string by trying the day
strptime directive. Raises ValueError if unsuccessful.
"""
try:
dt = datetime.strptime(s, '%d')
except ValueError:
raise ValueError('Could not extract day: {}'.format(s))
else:
return dt.day
def extract_year(self, s):
"""
Returns the year extracted from the given string. Raises ValueError if
unsuccessful.
"""
if not len(s) or s.lower() == 'this':
return self.now.year
if s.lower() == 'last':
return self.now.year - 1
return self._get_year(s)
def extract_month(self, s):
"""
Returns a (year, month) tuple extracted form the given string. Raises
ValueError if unsuccessful.
"""
li = s.split()
if len(li) == 0:
return self.now.year, self.now.month
elif len(li) == 1:
if s.lower() == 'last':
month = self.now.month - 1 or 12
year = self.now.year - 1 if month == 12 else self.now.year
elif s.lower() == 'this':
month = self.now.month
year = self.now.year
else:
month = self._get_month(s)
year = self.now.year
return year, month
elif len(li) == 2:
combos = self._try([self._get_year, self._get_month], li)
if len(combos) != 1:
raise ValueError('Could not infer month: {}'.format(s))
return combos[0][0], combos[0][1]
else:
raise ValueError('Could not infer month: {}'.format(s))
def extract_week(self, s):
"""
Returns a (date1, date2) tuple with date1 being a Monday and date2
being a Sunday defining the week extracted from the given string.
Raises ValueError if unsuccessful.
"""
monday = self.now.date() - timedelta(days=self.now.weekday())
sunday = monday + timedelta(days=6)
li = s.split()
if len(li) == 0:
return monday, sunday
elif len(li) == 1:
if s.lower() == 'last':
monday = monday - timedelta(days=7)
sunday = sunday - timedelta(days=7)
elif s.lower() == 'this':
pass
else:
raise ValueError('Could not infer week: {}'.format(s))
return monday, sunday
else:
raise ValueError('Could not infer week: {}'.format(s))
def extract_date(self, s):
"""
Returns a date instance extracted from the given string. Raises
ValueError if unsuccessful.
Apart from the _get_(year|month|day) permutations, this method also
recognises the ISO date format and words like today and yesterday.
"""
li = s.split()
if len(li) == 0:
return self.now.date()
elif len(li) == 1:
try:
dt = datetime.strptime(s, '%Y-%m-%d')
except ValueError:
pass
else:
return dt.date()
if s.lower() in ['last', 'yesterday']:
return self.now.date() - timedelta(days=1)
elif s.lower() in ['this', 'today']:
return self.now.date()
else:
day = self._get_day(li[0])
return date(self.now.year, self.now.month, day)
elif len(li) == 2:
combos = self._try([self._get_month, self._get_day], li)
if len(combos) != 1:
raise ValueError('Could not infer date: {}'.format(s))
return date(self.now.year, combos[0][0], combos[0][1])
elif len(li) == 3:
combos = self._try([self._get_year, self._get_month, self._get_day], li)
if len(combos) != 1:
raise ValueError('Could not infer date: {}'.format(s))
return date(combos[0][0], combos[0][1], combos[0][2])
else:
raise ValueError('Could not infer date: {}'.format(s))
def extract_datetime(self, s):
"""
Returns a datetime instance extracted from the given string. Unlike the
previous few methods, this one only accepts ISO format strings (the
seconds being optional).
"""
try:
dt = datetime.strptime(s[:ISO_FORMAT_LEN], ISO_FORMAT)
except ValueError:
raise ValueError('Could not infer datetime: {}'.format(s))
return dt
def _extract_span(self, tokens):
"""
Helper for the extract_span method. Parses the already tokenised input
string and returns two date instances. This method does not try to
catch any exceptions: this is done by extract_span.
"""
tags = []
for token in tokens:
if token.isdigit():
if len(token) == 4: tags.append('y')
else: tags.append('d')
else: tags.append('m')
if set(tags[-3:]) == set(['d', 'm', 'y']):
assert len(tokens) in [3, 4, 5, 6]
d2 = date(self._get_year(tokens[-3:][tags[-3:].index('y')]),
self._get_month(tokens[-3:][tags[-3:].index('m')]),
self._get_day(tokens[-3:][tags[-3:].index('d')]))
if len(tokens) == 6:
assert set(tags[:3]) == set(['d', 'm', 'y'])
d1 = date(self._get_year(tokens[tags[:3].index('y')]),
self._get_month(tokens[tags[:3].index('m')]),
self._get_day(tokens[tags[:3].index('d')]))
elif len(tokens) == 5:
assert set(tags[:2]) == set(['d', 'm'])
d1 = date(d2.year,
self._get_month(tokens[tags[:2].index('m')]),
self._get_day(tokens[tags[:2].index('d')]))
elif len(tokens) == 4:
assert tags[0] == 'd'
d1 = date(d2.year, d2.month)
elif len(tokens) == 3:
d1 = self.now.date()
elif set(tags[-2:]) == set(['d', 'm']):
assert len(tokens) in [2, 3, 4]
d2 = date(self.now.year,
self._get_month(tokens[-2:][tags[-2:].index('m')]),
self._get_day(tokens[-2:][tags[-2:].index('d')]))
if len(tokens) == 4:
assert set(tags[:2]) == set(['d', 'm'])
d1 = date(d2.year,
self._get_month(tokens[tags[:2].index('m')]),
self._get_day(tokens[tags[:2].index('d')]))
elif len(tokens) == 3:
assert tags[0] == 'd'
d1 = date(d2.year, d2.month, self._get_day(tokens[0]))
elif len(tokens) == 2:
d1 = self.now.date()
elif tags[-1] == 'd':
assert len(tokens) in [1, 2]
d2 = date(self.now.year, self.now.month, self._get_day(tokens[-1]))
if len(tokens) == 2:
assert tags[0] == 'd'
d1 = date(d2.year, d2.month, self._get_day(tokens[0]))
elif len(tokens) == 1:
d1 = self.now.date()
else:
raise ValueError
return d1, d2
def extract_span(self, s):
"""
Returns a (d1, d2) tuple of date instances extracted from the given
string. The earlier date will be first even if it was not first in the
string.
"""
tokens = s.split()
if len(tokens) < 1 or len(tokens) > 6:
raise ValueError('Could not infer dates: {}'.format(s))
try:
d1, d2 = self._extract_span(tokens)
except (AssertionError, ValueError) as err:
self.log.debug(str(err))
raise ValueError('Could not infer dates: {}'.format(s))
if d2 < d1:
d1, d2 = d2, d1
return d1, d2
"""
Functions that convert time units into pretty strings for human consumption
"""
def prettify_date(year, month, day=None):
"""
Returns a human-readable string representing the date defined by the given
parameters.
"""
if day:
d = date(year, month, day)
s = d.strftime('%d %b %Y')
else:
d = date(year, month, 1)
s = d.strftime('%b %Y')
return s.lower()
def prettify_datetime(dt):
"""
Returns a pretty string representing the given datetime instance. Units
smaller than minutes are not included.
"""
return ' '.join([
prettify_date(dt.year, dt.month, dt.day),
dt.strftime('%H:%M')
])
def prettify_delta(delta):
"""
Returns a human-readable string representing the given timedelta. The
biggest unit is the hour, because a working day is too ambiguous.
"""
d = {}
d['minutes'], d['seconds'] = divmod(int(delta.total_seconds()), 60)
d['hours'], d['minutes'] = divmod(d['minutes'], 60)
li = []
for unit in ('hours', 'minutes', 'seconds'):
if d[unit]:
s = str(d[unit])+' '+unit
if d[unit] == 1:
s = s[:-1]
li.append(s)
s = ', '.join(li)
if not s: s = '-'
return s
|
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import timedelta, datetime
import json
import pytz
from django.test import TestCase, Client, override_settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.management import call_command
from django.conf import settings
from django.utils.timezone import now
from django.core.urlresolvers import reverse
from decisions.subscriptions.models import (
UserProfile,
Subscription,
SubscriptionUser,
SubscriptionHit
)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'
)
class RegistrationTest(TestCase):
def setUp(self):
self.c = Client()
def testSubmitInvalidRegistrationForm(self):
resp = self.c.post(reverse('register'), {
'username': '',
'email': 'not an email',
'password': 'password',
'password_again': 'password'
})
# We are not redirected
self.assertEqual(resp.status_code, 200)
# There is at least one error
self.assertTrue(resp.context["form"].errors)
def _register(self, user_info):
base_info = {
'username': '',
'email': 'test@example.org',
'password': 'p4ssw0rd@',
'password_again': 'p4ssw0rd@',
}
base_info.update(user_info)
return self.c.post(reverse('register'), base_info, follow=True)
def testSubmitRegistrationWithoutUsername(self):
resp = self._register({})
# The final page is 200 OK
self.assertEqual(resp.status_code, 200)
# We're redirected
self.assertEqual(len(resp.redirect_chain), 1)
(url, code), = resp.redirect_chain
self.assertEqual(url, '/')
# We get a success message
self.assertEqual(len(resp.context["messages"]), 1)
# An user is created with some username
u = User.objects.get(email='test@example.org')
self.assertTrue(u.username)
def testSubmitRegistrationWithUsername(self):
resp = self._register({"username": "tester"})
# The final page is 200 OK
self.assertEqual(resp.status_code, 200)
# We're redirected
self.assertEqual(len(resp.redirect_chain), 1)
(url, code), = resp.redirect_chain
self.assertEqual(url, '/')
# We get a success message
self.assertEqual(len(resp.context["messages"]), 1)
# An user is created with specific username we gave
u = User.objects.get(email='test@example.org')
self.assertEqual(u.username, "tester")
def testActivationEmailActivates(self):
resp = self._register({})
u = User.objects.get(email='test@example.org')
self.assertTrue(getattr(mail, "outbox", False))
the_mail = mail.outbox.pop()
self.assertEqual(the_mail.to, ['test@example.org'])
self.assertIn(u.profile.email_confirm_code, the_mail.body)
resp = self.c.get(
reverse('confirm-email', args=(u.profile.email_confirm_code,)),
follow=True
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(
User.objects.get(email='test@example.org').profile.email_confirmed
)
def testActivationEmailExpires(self):
self._register({})
u = User.objects.get(email='test@example.org')
u.profile.email_confirm_sent_on = now() - timedelta(days=5*365)
u.profile.save()
resp = self.c.get(
reverse('confirm-email', args=(u.profile.email_confirm_code,)),
follow=True
)
self.assertEqual(resp.status_code, 200)
self.assertIn('subscriptions/not_confirmed.html', [t.name for t in resp.templates])
self.assertFalse(
User.objects.get(email='test@example.org').profile.email_confirmed
)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'STORAGE': 'ram'
},
}
)
class SubscriptionTest(TestCase):
"""TODO: Test subscribing to search results and getting a
notification. Also test getting a notification email."""
def setUp(self):
self.u = User.objects.create_user(
username="tester",
email="test@example.org",
password="test_password"
)
UserProfile.objects.create(user=self.u, email_confirmed=now())
self.c = Client()
self.c.login(email="test@example.org",
password="test_password")
# ---- Utilities
def _addAgendaItems(self):
from decisions.ahjo.models import AgendaItem
import os.path
base_path = os.path.dirname(os.path.abspath(__file__))
agenda_json_path = os.path.join(base_path, "agenda_items.json")
with open(agenda_json_path) as f:
for item in json.load(f)["objects"]:
AgendaItem.objects.create_from_json(item)
def _indexAndProcess(self):
call_command("rebuild_index", verbosity=0, interactive=False)
call_command("process_subscriptions", verbosity=0)
# --- Actual tests
def testSaveASearch(self):
self.assertEqual(
Subscription.objects.filter(subscribed_users=self.u).count(),
0
)
resp = self.c.post("/subscriptions/add/", {
"search_backend": 0,
"search_term": "omena",
"send_mail": False
}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.redirect_chain), 1)
self.assertEqual(
Subscription.objects.filter(subscribed_users=self.u).count(),
1
)
sub = SubscriptionUser.objects.get(user=self.u)
self.assertFalse(sub.send_mail)
self.assertEqual(sub.subscription.search_term, "omena")
def testSaveASearchWithEmail(self):
self.assertEqual(
Subscription.objects.filter(subscribed_users=self.u).count(),
0
)
resp = self.c.post("/subscriptions/add/", {
"search_backend": 0,
"search_term": "omena",
"send_mail": True
}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.redirect_chain), 1)
self.assertEqual(
Subscription.objects.filter(subscribed_users=self.u).count(),
1
)
sub = SubscriptionUser.objects.get(user=self.u)
self.assertTrue(sub.send_mail)
self.assertEqual(sub.subscription.search_term, "omena")
def testNewItemDoesNotNotify(self):
"""Just adding and indexing new items that won't match doesn't create
notifications
"""
resp = self.c.post("/subscriptions/add/", {
"search_term": "omena",
"send_mail": True
}, follow=True)
self.assertEqual(resp.status_code, 200)
self._addAgendaItems()
self._indexAndProcess()
self.assertEqual(len(getattr(mail, "outbox", ())), 0)
self.assertEqual(
SubscriptionHit.objects.filter(notified_users=self.u).count(),
0
)
# XXX Fails for some reason on Travis
def _testNewHitCreatesNotification(self):
self.assertEqual(Subscription.objects.count(), 0)
resp = self.c.post("/subscriptions/add/", {
"search_term": "asukasvalinnat",
"send_mail": False
}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Subscription.objects.count(), 1)
s = Subscription.objects.get(search_term="asukasvalinnat")
s.created = datetime(2014, 3, 27, 12, 16, 54, 858182,
tzinfo=pytz.UTC)
s.save()
self._addAgendaItems()
self._indexAndProcess()
self.assertEqual(
SubscriptionHit.objects.filter(notified_users=self.u).count(),
1
)
self.assertEqual(len(getattr(mail, "outbox", ())), 0)
# XXX Fails for some reason on Travis
def _testNewHitSendsNotificationEmail(self):
self.assertEqual(Subscription.objects.count(), 0)
resp = self.c.post("/subscriptions/add/", {
"search_term": "asukasvalinnat",
"send_mail": True
}, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Subscription.objects.count(), 1)
s = Subscription.objects.get(search_term="asukasvalinnat")
s.created = datetime(2014, 3, 27, 12, 16, 54, 858182,
tzinfo=pytz.UTC)
s.save()
self._addAgendaItems()
self._indexAndProcess()
self.assertEqual(len(getattr(mail, "outbox", ())), 1)
self.assertEqual(
SubscriptionHit.objects.filter(notified_users=self.u).count(),
1
)
|
|
"""A library of helper functions for the CherryPy test suite."""
import datetime
import logging
log = logging.getLogger(__name__)
import os
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
import unittest
import re
import sys
import time
import warnings
import io
import six
import cherrypy
from cherrypy._cpcompat import text_or_bytes, copyitems, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
from cherrypy.lib.reprconf import unrepr
from cherrypy.test import webtest
# Use subprocess module from Python 2.7 on Python 2.3-2.6
if sys.version_info < (2, 7):
import cherrypy._cpcompat_subprocess as subprocess
else:
import subprocess
import nose
_testconfig = None
def get_tst_config(overconf={}):
global _testconfig
if _testconfig is None:
conf = {
'scheme': 'http',
'protocol': "HTTP/1.1",
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'server': 'wsgi',
}
try:
import testconfig
_conf = testconfig.config.get('supervisor', None)
if _conf is not None:
for k, v in _conf.items():
if isinstance(v, text_or_bytes):
_conf[k] = unrepr(v)
conf.update(_conf)
except ImportError:
pass
_testconfig = conf
conf = _testconfig.copy()
conf.update(overconf)
return conf
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
log_to_stderr = lambda msg, level: sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
#engine.subscribe('log', log_to_stderr)
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
for name, server in copyitems(getattr(cherrypy, 'servers', {})):
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = "cherrypy._cpnative_server.CPHTTPServer"
using_apache = False
using_wsgi = False
def __str__(self):
return "Builtin HTTP Server on %s:%s" % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = "cherrypy._cpwsgi_server.CPWSGIServer"
using_apache = False
using_wsgi = True
def __str__(self):
return "Builtin WSGI Server on %s:%s" % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
"Error importing wsgiref. The validator will not run.")
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ""
scheme = "http"
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = "wsgi"
@classmethod
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info("Python version used to run this test script: %s" % v)
log.info("CherryPy version: %s" % cherrypy.__version__)
if supervisor.scheme == "https":
ssl = " (ssl)"
else:
ssl = ""
log.info("HTTP server version: %s%s" % (supervisor.protocol, ssl))
log.info("PID: %s" % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, text_or_bytes):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': "test_suite",
})
if supervisor.scheme == "https":
#baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == "https":
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
@classmethod
def setup_class(cls):
''
# Creates a server
conf = get_tst_config()
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
do_gc_test = False
def test_gc(self):
if not self.do_gc_test:
return
self.getPage("/gc/stats")
try:
self.assertBody("Statistics:")
except Exception:
"Failures occur intermittently. See #1420"
def prefix(self):
return self.script_name.rstrip("/")
def base(self):
if ((self.scheme == "http" and self.PORT == 80) or
(self.scheme == "https" and self.PORT == 443)):
port = ""
else:
port = ":%s" % self.PORT
return "%s://%s%s%s" % (self.scheme, self.HOST, port,
self.script_name.rstrip("/"))
def exit(self):
sys.exit()
def getPage(self, url, headers=None, method="GET", body=None,
protocol=None, raise_subcls=None):
"""Open the url. Return status, headers, body.
`raise_subcls` must be a tuple with the exceptions classes
or a single exception class that are not going to be considered
a socket.error regardless that they were are subclass of a
socket.error and therefore not considered for a connection retry.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, headers, method, body,
protocol, raise_subcls)
def skip(self, msg='skipped '):
raise nose.SkipTest(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + ntob('(.*)') + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=""):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ""
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(six.text_type(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
cherrypy._cpserver.wait_for_free_port(self.host, self.port)
args = [
os.path.join(thisdir, '..', 'cherryd'),
'-c', self.config_file,
'-p', self.pid_file,
]
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
cherrypy._cpserver.wait_for_occupied_port(self.host, self.port)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
try:
try:
# Mac, UNIX
os.wait()
except AttributeError:
# Windows
try:
pid = self.get_pid()
except IOError:
# Assume the subprocess deleted the pidfile on shutdown.
pass
else:
os.waitpid(pid, 0)
except OSError:
x = sys.exc_info()[1]
if x.args != (10, 'No child processes'):
raise
|
|
# Copyright (c) 2015, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import (integer, boolean, s3_bucket_name, notification_type,
notification_event, json_checker, task_type,
operating_system, compliance_level)
class NotificationConfig(AWSProperty):
props = {
'NotificationArn': (basestring, False),
'NotificationEvents': ([notification_event], False),
'NotificationType': (notification_type, False),
}
class LoggingInfo(AWSProperty):
props = {
'Region': (basestring, True),
'S3Bucket': (s3_bucket_name, True),
'S3Prefix': (basestring, False),
}
class MaintenanceWindowAutomationParameters(AWSProperty):
props = {
'DocumentVersion': (basestring, False),
'Parameters': (dict, False),
}
class MaintenanceWindowLambdaParameters(AWSProperty):
props = {
'ClientContext': (basestring, False),
'Payload': (json_checker, False),
'Qualifier': (basestring, False),
}
class MaintenanceWindowRunCommandParameters(AWSProperty):
props = {
'Comment': (basestring, False),
'DocumentHash': (basestring, False),
'DocumentHashType': (basestring, False),
'NotificationConfig': (NotificationConfig, False),
'OutputS3BucketName': (s3_bucket_name, False),
'OutputS3KeyPrefix': (basestring, False),
'Parameters': (dict, False),
'ServiceRoleArn': (basestring, False),
'TimeoutSeconds': (integer, False),
}
class MaintenanceWindowStepFunctionsParameters(AWSProperty):
props = {
'Input': (basestring, False),
'Name': (basestring, False),
}
class PatchFilter(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class PatchFilterGroup(AWSProperty):
props = {
'PatchFilters': ([PatchFilter], False),
}
class Rule(AWSProperty):
props = {
'ApproveAfterDays': (integer, False),
'ComplianceLevel': (compliance_level, False),
'PatchFilterGroup': (PatchFilter, False),
}
class RuleGroup(AWSProperty):
props = {
'PatchRules': ([Rule], False),
}
class TaskInvocationParameters(AWSProperty):
props = {
'MaintenanceWindowAutomationParameters':
(MaintenanceWindowAutomationParameters, False),
'MaintenanceWindowLambdaParameters':
(MaintenanceWindowLambdaParameters, False),
'MaintenanceWindowRunCommandParameters':
(MaintenanceWindowRunCommandParameters, False),
'MaintenanceWindowStepFunctionsParameters':
(MaintenanceWindowStepFunctionsParameters, False),
}
class Targets(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class Association(AWSObject):
resource_type = "AWS::SSM::Association"
props = {
'DocumentVersion': (basestring, False),
'InstanceId': (basestring, False),
'Name': (basestring, True),
'Parameters': (dict, False),
'ScheduleExpression': (basestring, False),
'Targets': ([Targets], False),
}
class Document(AWSObject):
resource_type = "AWS::SSM::Document"
props = {
# Need a better implementation of the SSM Document
'Content': (dict, True),
'DocumentType': (basestring, False),
}
class MaintenanceWindow(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindow"
props = {
'AllowUnassociatedTargets': (boolean, True),
'Cutoff': (integer, True),
'Description': (basestring, False),
'Duration': (integer, True),
'Name': (basestring, True),
'Schedule': (basestring, True),
}
class MaintenanceWindowTarget(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindowTarget"
props = {
'Description': (basestring, False),
'Name': (basestring, False),
'OwnerInformation': (basestring, False),
'ResourceType': (basestring, True),
'Targets': ([Targets], True),
'WindowId': (basestring, True),
}
class MaintenanceWindowTask(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindowTask"
props = {
'Description': (basestring, False),
'LoggingInfo': (LoggingInfo, False),
'MaxConcurrency': (integer, False),
'MaxErrors': (integer, True),
'Name': (basestring, False),
'Priority': (integer, True),
'ServiceRoleArn': (basestring, True),
'Targets': ([Targets], True),
'TaskArn': (basestring, True),
'TaskInvocationParameters': (TaskInvocationParameters, False),
'TaskParameters': (dict, False),
'TaskType': (task_type, True),
'WindowId': (basestring, False),
}
class Parameter(AWSObject):
resource_type = "AWS::SSM::Parameter"
props = {
'Description': (basestring, False),
'Name': (basestring, False),
'Type': (basestring, True),
'Value': (basestring, True),
}
class PatchBaseline(AWSObject):
resource_type = "AWS::SSM::PatchBaseline"
props = {
'ApprovalRules': (RuleGroup, False),
'ApprovedPatches': ([basestring], False),
'ApprovedPatchesComplianceLevel': (compliance_level, False),
'Description': (basestring, False),
'GlobalFilters': (PatchFilterGroup, False),
'Name': (basestring, True),
'OperatingSystem': (operating_system, False),
'PatchGroups': ([basestring], False),
'RejectedPatches': ([basestring], False),
}
|
|
# Copyright 2017, Major Hayden <major@mhtx.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the output methods."""
import json
from monitorstack.common import formatters
SAMPLE_RESULT = {
'exit_code': 0,
'message': 'uptime is ok',
'measurement_name': 'system_uptime',
'meta': {
'platform': 'example_platform',
},
'variables': {
'uptime': '29587.75'
}
}
SAMPLE_RESULT_ERROR = {
'exit_code': 1,
'message': 'uptime failed',
'measurement_name': 'system_uptime',
'meta': {},
'variables': {}
}
SAMPLE_RESULT_MEASUREMENT_TYPE = {
'exit_code': 0,
'message': 'uptime is ok',
'measurement_name': 'system_uptime',
'measurement_type': 'testType',
'meta': {
'platform': 'example_platform',
},
'variables': {
'uptime': '29587.75'
}
}
SAMPLE_RESULT_MEASUREMENT_UNITS = {
'exit_code': 0,
'message': 'uptime is ok',
'measurement_name': 'system_uptime',
'measurement_units': 'testUnits',
'meta': {
'platform': 'example_platform',
},
'variables': {
'uptime': '29587.75'
}
}
SAMPLE_RESULT_NO_META = {
'exit_code': 0,
'message': 'uptime is ok',
'measurement_name': 'system_uptime',
'variables': {
'uptime': '29587.75'
}
}
SAMPLE_RESULT_NO_META_WITH_FLOAT = {
'exit_code': 0,
'message': 'uptime is ok',
'measurement_name': 'system_uptime',
'variables': {
'uptime': float(29587.75)
}
}
class TestFormatters(object):
"""Tests for the base cli module."""
def test_current_time(self):
"""Test current_time()."""
result = formatters._current_time()
assert isinstance(result, int)
assert result > 0
def test__get_value_types_int32(self):
"""Test _get_value_types() with int."""
value, m_type = formatters._get_value_types(1)
assert value == 1
assert m_type == 'int32'
def test__get_value_types_int32_str(self):
"""Test _get_value_types() with int."""
value, m_type = formatters._get_value_types('1')
assert value == 1
assert m_type == 'int32'
def test__get_value_types_int64(self):
"""Test _get_value_types() with int."""
value, m_type = formatters._get_value_types(9999999999)
assert value == 9999999999
assert m_type == 'int64'
def test__get_value_types_int64_str(self):
"""Test _get_value_types() with int."""
value, m_type = formatters._get_value_types('9999999999')
assert value == 9999999999
assert m_type == 'int64'
def test__get_value_types_float(self):
"""Test _get_value_types() with float."""
value, m_type = formatters._get_value_types(1.1)
assert value == 1.1
assert m_type == 'float'
def test__get_value_types_float_str(self):
"""Test _get_value_types() with float."""
value, m_type = formatters._get_value_types('1.1')
assert value == 1.1
assert m_type == 'float'
def test__get_value_types_set_m_type(self):
"""Test _get_value_types() with float."""
value, m_type = formatters._get_value_types('1.1', 'double')
assert value == 1.1
assert m_type == 'double'
def test__get_value_types_string(self):
"""Test _get_value_types() with str."""
value, m_type = formatters._get_value_types('TestString')
assert value == 'TestString'
assert m_type == 'string'
def test_write_json(self, capsys):
"""Test write_json() module."""
formatters.write_json(SAMPLE_RESULT)
out, err = capsys.readouterr()
result_json = json.loads(out)
assert isinstance(result_json, dict)
assert result_json['measurement_name'] == \
SAMPLE_RESULT['measurement_name']
def test_write_line(self, capsys):
"""Test write_line() module."""
formatters.write_line(SAMPLE_RESULT)
out, err = capsys.readouterr()
assert out == "uptime {}\n".format(
SAMPLE_RESULT['variables']['uptime']
)
def test_write_telegraf(self, capsys):
"""Test write_telegraf() module."""
formatters.write_telegraf(SAMPLE_RESULT)
out, err = capsys.readouterr()
assert out.startswith(SAMPLE_RESULT['measurement_name'])
def test_write_telegraf_without_meta(self, capsys):
"""Test write_telegrat() module without meta in result."""
formatters.write_telegraf(SAMPLE_RESULT_NO_META)
out, err = capsys.readouterr()
assert out.startswith(SAMPLE_RESULT['measurement_name'])
def test_write_telegraf_line_format_with_float(self):
"""Test _telegraf_line_format() with float in meta."""
sets = {
'platform': 'example_platform',
'othervar': float(3)
}
result = formatters._telegraf_line_format(sets=sets, quote=True)
assert isinstance(result, str)
assert 'othervar=3' in result
assert 'platform="example_platform"' in result
def test_write_rax_maas(self, capsys):
"""Test write_telegraf() module."""
formatters.write_rax_maas(SAMPLE_RESULT)
out, err = capsys.readouterr()
assert SAMPLE_RESULT['message'] in out
assert 'metric uptime float 29587.75' in out
def test_write_rax_maas_with_types(self, capsys):
"""Test write_telegraf() module."""
formatters.write_rax_maas(SAMPLE_RESULT_MEASUREMENT_TYPE)
out, err = capsys.readouterr()
assert SAMPLE_RESULT['message'] in out
assert 'metric uptime testType 29587.75' in out
def test_write_rax_maas_with_units(self, capsys):
"""Test write_telegraf() module."""
formatters.write_rax_maas(SAMPLE_RESULT_MEASUREMENT_UNITS)
out, err = capsys.readouterr()
out_split = out.splitlines()
assert [i for i in out_split if SAMPLE_RESULT['message'] in i]
assert 'metric uptime float 29587.75 testUnits' in out_split
def test_write_rax_maas_with_error(self, capsys):
"""Test write_telegraf() module."""
formatters.write_rax_maas(SAMPLE_RESULT_ERROR)
out, err = capsys.readouterr()
out_split = out.splitlines()
assert [i for i in out_split if 'status error' in i]
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.billing.budgets_v1.types import budget_model
from google.cloud.billing.budgets_v1.types import budget_service
from google.protobuf import empty_pb2 # type: ignore
from .base import BudgetServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import BudgetServiceGrpcTransport
class BudgetServiceGrpcAsyncIOTransport(BudgetServiceTransport):
"""gRPC AsyncIO backend transport for BudgetService.
BudgetService stores Cloud Billing budgets, which define a
budget plan and rules to execute as we track spend against that
plan.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "billingbudgets.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "billingbudgets.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_budget(
self,
) -> Callable[[budget_service.CreateBudgetRequest], Awaitable[budget_model.Budget]]:
r"""Return a callable for the create budget method over gRPC.
Creates a new budget. See `Quotas and
limits <https://cloud.google.com/billing/quotas>`__ for more
information on the limits of the number of budgets you can
create.
Returns:
Callable[[~.CreateBudgetRequest],
Awaitable[~.Budget]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_budget" not in self._stubs:
self._stubs["create_budget"] = self.grpc_channel.unary_unary(
"/google.cloud.billing.budgets.v1.BudgetService/CreateBudget",
request_serializer=budget_service.CreateBudgetRequest.serialize,
response_deserializer=budget_model.Budget.deserialize,
)
return self._stubs["create_budget"]
@property
def update_budget(
self,
) -> Callable[[budget_service.UpdateBudgetRequest], Awaitable[budget_model.Budget]]:
r"""Return a callable for the update budget method over gRPC.
Updates a budget and returns the updated budget.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. Budget
fields that are not exposed in this API will not be
changed by this method.
Returns:
Callable[[~.UpdateBudgetRequest],
Awaitable[~.Budget]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_budget" not in self._stubs:
self._stubs["update_budget"] = self.grpc_channel.unary_unary(
"/google.cloud.billing.budgets.v1.BudgetService/UpdateBudget",
request_serializer=budget_service.UpdateBudgetRequest.serialize,
response_deserializer=budget_model.Budget.deserialize,
)
return self._stubs["update_budget"]
@property
def get_budget(
self,
) -> Callable[[budget_service.GetBudgetRequest], Awaitable[budget_model.Budget]]:
r"""Return a callable for the get budget method over gRPC.
Returns a budget.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. When
reading from the API, you will not see these fields in
the return value, though they may have been set in the
Cloud Console.
Returns:
Callable[[~.GetBudgetRequest],
Awaitable[~.Budget]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_budget" not in self._stubs:
self._stubs["get_budget"] = self.grpc_channel.unary_unary(
"/google.cloud.billing.budgets.v1.BudgetService/GetBudget",
request_serializer=budget_service.GetBudgetRequest.serialize,
response_deserializer=budget_model.Budget.deserialize,
)
return self._stubs["get_budget"]
@property
def list_budgets(
self,
) -> Callable[
[budget_service.ListBudgetsRequest],
Awaitable[budget_service.ListBudgetsResponse],
]:
r"""Return a callable for the list budgets method over gRPC.
Returns a list of budgets for a billing account.
WARNING: There are some fields exposed on the Google
Cloud Console that aren't available on this API. When
reading from the API, you will not see these fields in
the return value, though they may have been set in the
Cloud Console.
Returns:
Callable[[~.ListBudgetsRequest],
Awaitable[~.ListBudgetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_budgets" not in self._stubs:
self._stubs["list_budgets"] = self.grpc_channel.unary_unary(
"/google.cloud.billing.budgets.v1.BudgetService/ListBudgets",
request_serializer=budget_service.ListBudgetsRequest.serialize,
response_deserializer=budget_service.ListBudgetsResponse.deserialize,
)
return self._stubs["list_budgets"]
@property
def delete_budget(
self,
) -> Callable[[budget_service.DeleteBudgetRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete budget method over gRPC.
Deletes a budget. Returns successfully if already
deleted.
Returns:
Callable[[~.DeleteBudgetRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_budget" not in self._stubs:
self._stubs["delete_budget"] = self.grpc_channel.unary_unary(
"/google.cloud.billing.budgets.v1.BudgetService/DeleteBudget",
request_serializer=budget_service.DeleteBudgetRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_budget"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BudgetServiceGrpcAsyncIOTransport",)
|
|
# -*- coding: utf-8 -*-
import operator
from copy import deepcopy
from operator import itemgetter
from typing import Callable, Iterable, List, Dict
from alamo_common.parser.exceptions import ParseResultError
class BaseOperator(object):
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, str(self))
def safe_compare(value: float,
criteria: float,
condition: Callable,
default: bool = False) -> bool:
try:
return condition(value, criteria)
except TypeError:
# `None` is not failed value
return default
class CompareMixin(object):
def filter(self,
values: Iterable,
threshold: float,
condition: Callable) -> List:
return list(filter(
lambda v: safe_compare(v, threshold, condition), values
))
class Hysteresis(CompareMixin):
"""It's a virtual data type that performs comparison
to some criteria and hysteresis and
result wraps as `ResultBucket` object.
"""
__slots__ = ('values', 'hysteresis', 'rule')
def __init__(self, values, hysteresis, rule):
self.values = values
self.hysteresis = hysteresis
self.rule = rule
def __str__(self):
return '{}'.format(self.values)
def __lt__(self, other):
return self.cmp(other, other + self.hysteresis, operator.lt)
def __le__(self, other):
return self.cmp(other, other + self.hysteresis, operator.le)
def __eq__(self, other):
return self.cmp(other, other, operator.eq)
def __ne__(self, other):
return self.cmp(other, other, operator.ne)
def __gt__(self, other):
return self.cmp(other, other - self.hysteresis, operator.gt)
def __ge__(self, other):
return self.cmp(other, other - self.hysteresis, operator.ge)
def cmp(self, other, hysteresis, condition) -> 'ResultBucket':
_threshold = self.filter(self.values, other, condition)
_hysteresis = self.filter(self.values, hysteresis, condition)
return ResultBucket(
bool(_threshold),
value_meta=_threshold,
hysteresis=bool(_hysteresis),
hysteresis_meta=_hysteresis
)
class Array(CompareMixin):
"""It's a list imitation with ability to return filtered sub-list
by applying comparison operator (eg >=, <, ...).
"""
__slots__ = ('values',)
def __init__(self, values: Iterable = None):
self.values = tuple(values or ()) # type: tuple
def __str__(self):
return '[{}]'.format(', '.join(map(str, self)))
__repr__ = __str__
def __iter__(self):
return iter(self.values)
def __getitem__(self, index):
return self.values[index]
def __lt__(self, other):
return self.cmp(other, operator.lt)
def __le__(self, other):
return self.cmp(other, operator.le)
def __eq__(self, other):
return self.cmp(other, operator.eq)
def __ne__(self, other):
return self.cmp(other, operator.ne)
def __gt__(self, other):
return self.cmp(other, operator.gt)
def __ge__(self, other):
return self.cmp(other, operator.ge)
def cmp(self, other, condition) -> List:
threshold = self.filter(self, other, condition)
return threshold
def append(self, item):
self.values += (item,)
class NamedArray(Array):
"""An array with a name.
Comparison operations return NamedArray object instead of plain list.
"""
__slots__ = ('name', 'values')
def __init__(self, name: str, values: Iterable):
self.name = name
self.values = tuple(values or ())
def cmp(self, other, condition) -> 'NamedArray':
return NamedArray(name=self.name, values=super().cmp(other, condition))
def __str__(self):
return '{}: {}'.format(self.name.__repr__(), super().__str__())
__repr__ = __str__
def __bool__(self):
return bool(self.values)
class Series(Array):
"""A collection of named arrays."""
def __init__(self, values: Iterable):
self.values = tuple(values)
@classmethod
def from_dict(cls, data: Dict) -> 'Series':
return Series(NamedArray(name=k, values=v) for k, v in data.items())
def cmp(self, other, condition) -> 'NamedArray':
filtered = self.filter(self.values, other, condition)
threshold = (condition(array, other) for array in filtered)
return Series(threshold)
def __bool__(self):
return bool(self.values)
def __str__(self):
return '{{{}}}'.format(', '.join(map(str, self)))
class ResultBucket(BaseOperator):
"""Result bucket to wrap all kinds of results
and also keep information about hysteresis.
"""
__slots__ = (
'value',
'value_meta',
'messages',
'hysteresis',
'hysteresis_meta',
'hysteresis_message',
)
def __init__(self, value,
value_meta: List = None,
hysteresis: bool = None,
hysteresis_meta: List = None,
hysteresis_message: str = None,
message: str = None,
optional: str = None):
self.value = value
self.messages = []
if message:
self.messages.append(message)
if value and optional:
self.messages.append(optional)
self.hysteresis = hysteresis
self.hysteresis_meta = hysteresis_meta
self.hysteresis_message = hysteresis_message
self.value_meta = value_meta
def __str__(self):
return '{}'.format(self.value)
def __bool__(self):
return bool(self.value)
def __neg__(self):
neg = deepcopy(self)
# negation drops all messages
neg.messages.clear()
neg.value = not self.value
return neg
def _copy_(self, value, other) -> 'ResultBucket':
hysteresis = hysteresis_meta = hysteresis_message = None
if self.hysteresis:
hysteresis = self.hysteresis
hysteresis_meta = self.hysteresis_meta
hysteresis_message = self.hysteresis_message
elif other.hysteresis:
hysteresis = other.hysteresis
hysteresis_meta = other.hysteresis_meta
hysteresis_message = other.hysteresis_message
result = ResultBucket(
value,
hysteresis=hysteresis,
hysteresis_meta=hysteresis_meta,
hysteresis_message=hysteresis_message,
)
result.messages.extend(self.messages)
result.messages.extend(other.messages)
return result
def __and__(self, other):
value = self.value and other.value
return self._copy_(value, other)
def __or__(self, other):
value = self.value or other.value
return self._copy_(value, other)
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def is_valid(self, raise_exception: bool = False) -> bool:
valid = not bool(self.value)
if raise_exception and not valid:
message = ', '.join(self.messages) or 'Unknown reason'
raise ParseResultError(message)
return valid
class DictsArray(Array):
"""It's a list of dicts with ability to compare any
dict value by dict key.
"""
__slots__ = ('values', 'key',)
def __init__(self, values: Iterable, key: str):
self.key = key
self.values = tuple(values)
def __str__(self):
return "[{}]".format(
','.join(map(str, map(itemgetter(self.key), self.values)))
)
def __bool__(self):
return bool(self.values)
def filter(self,
values: Iterable,
threshold: float,
condition: Callable):
return DictsArray(
list(filter(
lambda v: safe_compare(v[self.key], threshold, condition),
values
)),
self.key
)
|
|
import re
import socket
import struct
from threading import Thread
import traceback
import time
from src.connection.connection import Connection
from src.connection.websocket import WebSocket
from src.helpers import websocket_helper
from src.logic.file_transfer import FileTransfer
from src.utility.exceptions import PasswordException, NewPasswordException, OperationError, HostnameResolutionError
class WifiConnection(Connection):
WEBREPL_REQ_S = "<2sBBQLH64s"
WEBREPL_PUT_FILE = 1
WEBREPL_GET_FILE = 2
WEBREPL_GET_VER = 3
def __init__(self, host, port, terminal, password_prompt):
Connection.__init__(self, terminal)
self._host = host
self._port = port
self.s = None
self.ws = None
try:
self._start_connection()
except Exception as e:
self._clear()
raise e
if not self.handle_password(password_prompt):
self._clear()
raise PasswordException()
self._reader_thread = Thread(target=self._reader_thread_routine)
self._reader_thread.start()
def _start_connection(self):
self.s = socket.socket()
self.s.settimeout(3)
try:
errno = self.s.connect_ex((self._host, self._port))
except socket.gaierror: # Failed to resolve hostname
raise HostnameResolutionError()
if errno != 0:
raise ConnectionError("Failed to connect to the device.")
self.s.settimeout(None)
# Test if connection is working
try:
websocket_helper.client_handshake(self.s)
except (ConnectionResetError, ConnectionAbortedError):
raise ConnectionError("Device refused connection.")
self.s.setblocking(0)
self.ws = WebSocket(self.s)
return True
def _clear(self):
self.ws = None
if self.s:
self.s.close()
self.s = None
def set_password(self):
password = "passw".encode("utf-8") + b"\r"
self.ws.write(password)
response = self.ws.read_all().decode("utf-8")
if response.find("Confirm password:") < 0:
return False
self.ws.write(password)
try:
response = self.ws.read_all().decode("utf-8")
return response.find("Password successfully set") >= 0
except ConnectionAbortedError:
# If connection was aborted, password was set
return True
def login(self, password):
self.ws.write(password.encode("utf-8") + b"\r")
try:
response = self.ws.read_all().decode("utf-8")
return response.find("WebREPL connected") >= 0
except ConnectionAbortedError:
return False
def handle_password(self, password_prompt):
content = self.ws.read_all().decode("utf-8")
if content.find("New password:") >= 0:
self.set_password()
raise NewPasswordException()
elif content.find("Password:") >= 0:
return self.login(password_prompt("Enter WebREPL password"))
else:
return False
def is_connected(self):
return self.ws is not None
def disconnect(self):
if self.is_connected():
if self._reader_thread.is_alive():
self._reader_running = False
self._reader_thread.join()
self.s.close()
self.s = None
def read_all(self):
x = self.ws.read_all().decode("utf-8", errors="replace")
if x and self._terminal is not None:
self._terminal.add(x)
return x
# TODO: Join with serial implementation? At least the special character handling
def read_line(self):
x = self.ws.read_all(0.2)
if x and self._terminal is not None:
if x == b'\x08\x1b[K':
x = b'\x08'
if x[:3] == b'\x1b[': # Control sequence
# TODO: regex firt match 14D on x[3:]
pass
self._terminal.add(x.decode("utf-8", errors="replace"))
return x
def read_junk(self):
self.ws.read_all(0)
def read_one_byte(self):
return self.ws.read(1)
def send_character(self, char):
assert isinstance(char, str)
self.ws.write(char)
def send_bytes(self, binary):
self.ws.write(binary)
def send_line(self, line_text, ending="\r\n"):
assert isinstance(line_text, str)
assert isinstance(ending, str)
self.ws.write(line_text + ending)
@staticmethod
def read_resp(ws):
data = ws.read(4)
sig, code = struct.unpack("<2sH", data)
assert sig == b"WB"
return code
def _read_file_job(self, file_name, transfer):
assert isinstance(transfer, FileTransfer)
try:
file_size = self.get_file_size(file_name)
except OperationError:
transfer.mark_error("Failed to determine file size.")
return
if isinstance(file_name, str):
file_name = file_name.encode("utf-8")
ret = b""
rec = struct.pack(WifiConnection.WEBREPL_REQ_S, b"WA", WifiConnection.WEBREPL_GET_FILE,
0, 0, 0, len(file_name), file_name)
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.read_junk()
self.ws.write(rec, True)
assert self.read_resp(self.ws) == 0
while True:
# Confirm message
self.ws.write(b"\1", True)
(sz,) = struct.unpack("<H", self.ws.read(2))
if sz == 0:
break
while sz:
buf = self.ws.read(sz)
if not buf:
raise OSError()
ret += buf
sz -= len(buf)
transfer.progress = len(ret) / file_size
if self.read_resp(self.ws) == 0:
transfer.mark_finished()
transfer.read_result.binary_data = ret
else:
transfer.mark_error()
transfer.read_result.binary_data = None
self._auto_read_enabled = True
self._auto_reader_lock.release()
def _write_file_job(self, file_name, text, transfer):
def mark_error_and_release():
transfer.mark_error()
self._auto_read_enabled = True
self._auto_reader_lock.release()
assert isinstance(transfer, FileTransfer)
if isinstance(file_name, str):
file_name = file_name.encode("utf-8")
if isinstance(text, str):
text = text.encode("utf-8")
sz = len(text)
rec = struct.pack(WifiConnection.WEBREPL_REQ_S, b"WA", WifiConnection.WEBREPL_PUT_FILE, 0, 0, sz,
len(file_name), file_name)
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.read_junk()
self.ws.write(rec[:10], file_transfer=True)
self.ws.write(rec[10:], file_transfer=True)
try:
if self.read_resp(self.ws) != 0:
mark_error_and_release()
return
except TimeoutError:
mark_error_and_release()
return
cnt = 0
# Increase timeout from default value which gives
# more time to MCU to process large files.
original_timeout = self.ws.recv_timeout
self.ws.recv_timeout = 30
try:
while True:
buf = text[cnt:cnt + 256]
if not buf:
break
self.ws.write(buf, file_transfer=True)
cnt += len(buf)
transfer.progress = cnt / sz
if self.read_resp(self.ws) == 0:
transfer.mark_finished()
else:
transfer.mark_error()
except ConnectionResetError:
transfer.mark_error("Connection was reset.")
except ConnectionError:
transfer.mark_error()
except Exception as generalException:
info = "Unexpected error, report this on project's github issues page\n{}: {}\n{}".format(
type(generalException).__name__,
str(generalException),
"".join(traceback.format_tb(generalException.__traceback__))
)
transfer.mark_error(info)
finally:
self.ws.recv_timeout = original_timeout
self._auto_read_enabled = True
self._auto_reader_lock.release()
|
|
import os
import re
import traceback
import requests
import zipfile
from xml.dom import minidom
from tempfile import NamedTemporaryFile
from urlparse import urljoin
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.mail import mail_admins
from django.utils.translation import ugettext as _
from django.core.files.storage import get_storage_class
import common_tags as tag
SLASH = u"/"
class MyError(Exception):
pass
def image_urls_for_form(xform):
return sum([
image_urls(s) for s in xform.surveys.all()
], [])
def get_path(path, suffix):
fileName, fileExtension = os.path.splitext(path)
return fileName + suffix + fileExtension
def image_urls(instance):
default_storage = get_storage_class()()
urls = []
suffix = settings.THUMB_CONF['medium']['suffix']
for a in instance.attachments.all():
if default_storage.exists(get_path(a.media_file.name, suffix)):
url = default_storage.url(
get_path(a.media_file.name, suffix))
else:
url = a.media_file.url
urls.append(url)
return urls
def parse_xform_instance(xml_str):
"""
'xml_str' is a str object holding the XML of an XForm
instance. Return a python object representation of this XML file.
"""
xml_obj = minidom.parseString(xml_str)
root_node = xml_obj.documentElement
# go through the xml object creating a corresponding python object
# NOTE: THIS WILL DESTROY ANY DATA COLLECTED WITH REPEATABLE NODES
# THIS IS OKAY FOR OUR USE CASE, BUT OTHER USERS SHOULD BEWARE.
survey_data = dict(_path_value_pairs(root_node))
assert len(list(_all_attributes(root_node))) == 1, \
_(u"There should be exactly one attribute in this document.")
survey_data.update({
tag.XFORM_ID_STRING: root_node.getAttribute(u"id"),
tag.INSTANCE_DOC_NAME: root_node.nodeName,
})
return survey_data
def _path(node):
n = node
levels = []
while n.nodeType != n.DOCUMENT_NODE:
levels = [n.nodeName] + levels
n = n.parentNode
return SLASH.join(levels[1:])
def _path_value_pairs(node):
"""
Using a depth first traversal of the xml nodes build up a python
object in parent that holds the tree structure of the data.
"""
if len(node.childNodes) == 0:
# there's no data for this leaf node
yield _path(node), None
elif len(node.childNodes) == 1 and \
node.childNodes[0].nodeType == node.TEXT_NODE:
# there is data for this leaf node
yield _path(node), node.childNodes[0].nodeValue
else:
# this is an internal node
for child in node.childNodes:
for pair in _path_value_pairs(child):
yield pair
def _all_attributes(node):
"""
Go through an XML document returning all the attributes we see.
"""
if hasattr(node, "hasAttributes") and node.hasAttributes():
for key in node.attributes.keys():
yield key, node.getAttribute(key)
for child in node.childNodes:
for pair in _all_attributes(child):
yield pair
def report_exception(subject, info, exc_info=None):
if exc_info:
cls, err = exc_info[:2]
info += _(u"Exception in request: %(class)s: %(error)s") \
% {'class': cls.__name__, 'error': err}
info += u"".join(traceback.format_exception(*exc_info))
if settings.DEBUG:
print subject
print info
else:
mail_admins(subject=subject, message=info)
def django_file(path, field_name, content_type):
# adapted from here:
# http://groups.google.com/group/django-users/browse_thread/thread/834f988876ff3c45/
f = open(path)
return InMemoryUploadedFile(
file=f,
field_name=field_name,
name=f.name,
content_type=content_type,
size=os.path.getsize(path),
charset=None
)
def export_def_from_filename(filename):
from odk_viewer.models.export import Export
path, ext = os.path.splitext(filename)
ext = ext[1:]
# try get the def from extension
mime_type = Export.EXPORT_MIMES[ext]
return ext, mime_type
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def enketo_url(form_url, id_string, instance_xml=None,
instance_id=None, return_url=None):
if not hasattr(settings, 'ENKETO_URL')\
and not hasattr(settings, 'ENKETO_API_SURVEY_PATH'):
return False
url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_SURVEY_PATH)
values = {
'form_id': id_string,
'server_url': form_url
}
if instance_id is not None and instance_xml is not None:
url = urljoin(settings.ENKETO_URL, settings.ENKETO_API_INSTANCE_PATH)
values.update({
'instance': instance_xml,
'instance_id': instance_id,
'return_url': return_url
})
req = requests.post(url, data=values,
auth=(settings.ENKETO_API_TOKEN, '')) #, verify=False)
import pprint
pprint.pprint(url)
pprint.pprint(req.json())
if req.status_code in [200, 201]:
try:
response = req.json()
except ValueError:
pass
else:
if 'edit_url' in response:
return response['edit_url']
if 'url' in response:
return response['url']
else:
try:
response = req.json()
except ValueError:
pass
else:
if 'message' in response:
raise Exception(response['message'])
return False
def create_attachments_zipfile(attachments):
# create zip_file
tmp = NamedTemporaryFile(delete=False)
z = zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for attachment in attachments:
default_storage = get_storage_class()()
if default_storage.exists(attachment.media_file.name):
try:
z.write(attachment.full_filepath, attachment.media_file.name)
except Exception, e:
report_exception("Create attachment zip exception", e)
z.close()
return tmp.name
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import time
from bson import json_util
from tests import base
from girder import events
from girder.constants import AccessType
from girder.exceptions import ValidationException
from girder.models.user import User
from girder.models.token import Token
from girder_jobs.constants import JobStatus, REST_CREATE_JOB_TOKEN_SCOPE
from girder_jobs.models.job import Job
def setUpModule():
base.enabledPlugins.append('jobs')
base.startServer()
def tearDownModule():
base.stopServer()
class JobsTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.users = [User().createUser(
'usr' + str(n), 'passwd', 'tst', 'usr', 'u%d@u.com' % n)
for n in range(3)]
self.jobModel = Job()
def testJobs(self):
self.job = None
def schedule(event):
self.job = event.info
if self.job['handler'] == 'my_handler':
self.job['status'] = JobStatus.RUNNING
self.job = self.jobModel.save(self.job)
self.assertEqual(self.job['args'], ('hello', 'world'))
self.assertEqual(self.job['kwargs'], {'a': 'b'})
events.bind('jobs.schedule', 'test', schedule)
# Create a job
job = self.jobModel.createJob(
title='Job Title', type='my_type', args=('hello', 'world'),
kwargs={'a': 'b'}, user=self.users[1], handler='my_handler',
public=False)
self.assertEqual(self.job, None)
self.assertEqual(job['status'], JobStatus.INACTIVE)
# Schedule the job, make sure our handler was invoked
self.jobModel.scheduleJob(job)
self.assertEqual(self.job['_id'], job['_id'])
self.assertEqual(self.job['status'], JobStatus.RUNNING)
# Since the job is not public, user 2 should not have access
path = '/job/%s' % job['_id']
resp = self.request(path, user=self.users[2])
self.assertStatus(resp, 403)
resp = self.request(path, user=self.users[2], method='PUT')
self.assertStatus(resp, 403)
resp = self.request(path, user=self.users[2], method='DELETE')
self.assertStatus(resp, 403)
# If no user is specified, we should get a 401 error
resp = self.request(path, user=None)
self.assertStatus(resp, 401)
# Make sure user who created the job can see it
resp = self.request(path, user=self.users[1])
self.assertStatusOk(resp)
# We should be able to update the job as the user who created it
resp = self.request(path, method='PUT', user=self.users[1], params={
'log': 'My log message\n'
})
self.assertStatusOk(resp)
# We should be able to create a job token and use that to update it too
token = self.jobModel.createJobToken(job)
resp = self.request(path, method='PUT', params={
'log': 'append message',
'token': token['_id']
})
self.assertStatusOk(resp)
# We shouldn't get the log back in this case
self.assertNotIn('log', resp.json)
# Do a fetch on the job itself to get the log
resp = self.request(path, user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(
resp.json['log'], ['My log message\n', 'append message'])
# Test overwriting the log and updating status
resp = self.request(path, method='PUT', params={
'log': 'overwritten log',
'overwrite': 'true',
'status': JobStatus.SUCCESS,
'token': token['_id']
})
self.assertStatusOk(resp)
self.assertNotIn('log', resp.json)
self.assertEqual(resp.json['status'], JobStatus.SUCCESS)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['overwritten log'])
# We should be able to delete the job as the user who created it
resp = self.request(path, user=self.users[1], method='DELETE')
self.assertStatusOk(resp)
job = self.jobModel.load(job['_id'], force=True)
self.assertIsNone(job)
def testLegacyLogBehavior(self):
# Force save a job with a string log to simulate a legacy job record
job = self.jobModel.createJob(
title='legacy', type='legacy', user=self.users[1], save=False)
job['log'] = 'legacy log'
job = self.jobModel.save(job, validate=False)
self.assertEqual(job['log'], 'legacy log')
# Load the record, we should now get the log as a list
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['legacy log'])
def testListJobs(self):
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=False)
anonJob = self.jobModel.createJob(title='Anon job', type='t')
# Ensure timestamp for public job is strictly higher (ms resolution)
time.sleep(0.1)
publicJob = self.jobModel.createJob(
title='Anon job', type='t', public=True)
# User 1 should be able to see their own jobs
resp = self.request('/job', user=self.users[1], params={
'userId': self.users[1]['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(job['_id']))
# User 2 should not see user 1's jobs in the list
resp = self.request('/job', user=self.users[2], params={
'userId': self.users[1]['_id']
})
self.assertEqual(resp.json, [])
# Omitting a userId should assume current user
resp = self.request('/job', user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(job['_id']))
# Explicitly passing "None" should show anonymous jobs
resp = self.request('/job', user=self.users[0], params={
'userId': 'none'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['_id'], str(publicJob['_id']))
self.assertEqual(resp.json[1]['_id'], str(anonJob['_id']))
# Non-admins should only see public anon jobs
resp = self.request('/job', params={'userId': 'none'})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(publicJob['_id']))
def testListAllJobs(self):
self.jobModel.createJob(title='user 0 job', type='t', user=self.users[0], public=False)
self.jobModel.createJob(title='user 1 job', type='t', user=self.users[1], public=False)
self.jobModel.createJob(title='user 1 job', type='t', user=self.users[1], public=True)
self.jobModel.createJob(title='user 2 job', type='t', user=self.users[2])
self.jobModel.createJob(title='anonymous job', type='t')
self.jobModel.createJob(title='anonymous public job', type='t2', public=True)
# User 0, as a site admin, should be able to see all jobs
resp = self.request('/job/all', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 6)
# Test deprecated listAll method
jobs = list(self.jobModel.listAll(limit=0, offset=0, sort=None, currentUser=self.users[0]))
self.assertEqual(len(jobs), 6)
# get with filter
resp = self.request('/job/all', user=self.users[0], params={
'types': json.dumps(['t']),
'statuses': json.dumps([0])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 5)
# get with unmet filter conditions
resp = self.request('/job/all', user=self.users[0], params={
'types': json.dumps(['nonexisttype'])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
# User 1, as non site admin, should encounter http 403 (Forbidden)
resp = self.request('/job/all', user=self.users[1])
self.assertStatus(resp, 403)
# Not authenticated user should encounter http 401 (unauthorized)
resp = self.request('/job/all')
self.assertStatus(resp, 401)
def testFiltering(self):
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=True)
job['_some_other_field'] = 'foo'
job = self.jobModel.save(job)
resp = self.request('/job/%s' % job['_id'])
self.assertStatusOk(resp)
self.assertTrue('created' in resp.json)
self.assertTrue('_some_other_field' not in resp.json)
self.assertTrue('kwargs' not in resp.json)
self.assertTrue('args' not in resp.json)
resp = self.request('/job/%s' % job['_id'], user=self.users[0])
self.assertTrue('kwargs' in resp.json)
self.assertTrue('args' in resp.json)
self.jobModel.exposeFields(level=AccessType.READ, fields={'_some_other_field'})
self.jobModel.hideFields(level=AccessType.READ, fields={'created'})
resp = self.request('/job/%s' % job['_id'])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_some_other_field'], 'foo')
self.assertTrue('created' not in resp.json)
def testJobProgressAndNotifications(self):
job = self.jobModel.createJob(title='a job', type='t', user=self.users[1], public=True)
path = '/job/%s' % job['_id']
resp = self.request(path)
self.assertEqual(resp.json['progress'], None)
self.assertEqual(resp.json['timestamps'], [])
resp = self.request(path, method='PUT', user=self.users[1], params={
'progressTotal': 100,
'progressCurrent': 3,
'progressMessage': 'Started',
'notify': 'false',
'status': JobStatus.QUEUED
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['progress'], {
'total': 100,
'current': 3,
'message': 'Started',
'notificationId': None
})
# The status update should make it so we now have a timestamp
self.assertEqual(len(resp.json['timestamps']), 1)
self.assertEqual(resp.json['timestamps'][0]['status'], JobStatus.QUEUED)
self.assertIn('time', resp.json['timestamps'][0])
# If the status does not change on update, no timestamp should be added
resp = self.request(path, method='PUT', user=self.users[1], params={
'status': JobStatus.QUEUED
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['timestamps']), 1)
self.assertEqual(resp.json['timestamps'][0]['status'], JobStatus.QUEUED)
# We passed notify=false, so we should only have the job creation notification
resp = self.request(path='/notification/stream', method='GET',
user=self.users[1], isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
# Update progress with notify=true (the default)
resp = self.request(path, method='PUT', user=self.users[1], params={
'progressCurrent': 50,
'progressMessage': 'Something bad happened',
'status': JobStatus.ERROR
})
self.assertStatusOk(resp)
self.assertNotEqual(resp.json['progress']['notificationId'], None)
# We should now see three notifications (job created + job status + progress)
resp = self.request(path='/notification/stream', method='GET',
user=self.users[1], isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
job = self.jobModel.load(job['_id'], force=True)
self.assertEqual(len(messages), 3)
creationNotify = messages[0]
progressNotify = messages[1]
statusNotify = messages[2]
self.assertEqual(creationNotify['type'], 'job_created')
self.assertEqual(creationNotify['data']['_id'], str(job['_id']))
self.assertEqual(statusNotify['type'], 'job_status')
self.assertEqual(statusNotify['data']['_id'], str(job['_id']))
self.assertEqual(int(statusNotify['data']['status']), JobStatus.ERROR)
self.assertNotIn('kwargs', statusNotify['data'])
self.assertNotIn('log', statusNotify['data'])
self.assertEqual(progressNotify['type'], 'progress')
self.assertEqual(progressNotify['data']['title'], job['title'])
self.assertEqual(progressNotify['data']['current'], float(50))
self.assertEqual(progressNotify['data']['state'], 'error')
self.assertEqual(progressNotify['_id'], str(job['progress']['notificationId']))
def testDotsInKwargs(self):
kwargs = {
'$key.with.dots': 'value',
'foo': [{
'moar.dots': True
}]
}
job = self.jobModel.createJob(title='dots', type='x', user=self.users[0], kwargs=kwargs)
# Make sure we can update a job and notification creation works
self.jobModel.updateJob(job, status=JobStatus.QUEUED, notify=True)
self.assertEqual(job['kwargs'], kwargs)
resp = self.request('/job/%s' % job['_id'], user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['kwargs'], kwargs)
job = self.jobModel.load(job['_id'], force=True)
self.assertEqual(job['kwargs'], kwargs)
job = self.jobModel.filter(job, self.users[0])
self.assertEqual(job['kwargs'], kwargs)
job = self.jobModel.filter(job, self.users[1])
self.assertFalse('kwargs' in job)
def testLocalJob(self):
job = self.jobModel.createLocalJob(
title='local', type='local', user=self.users[0], kwargs={
'hello': 'world'
}, module='plugin_tests.local_job_impl')
self.jobModel.scheduleJob(job)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['job ran!'])
job = self.jobModel.createLocalJob(
title='local', type='local', user=self.users[0], kwargs={
'hello': 'world'
}, module='plugin_tests.local_job_impl', function='fail')
self.jobModel.scheduleJob(job)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['job failed'])
def testValidateCustomStatus(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
def validateStatus(event):
if event.info == 1234:
event.preventDefault().addResponse(True)
def validTransitions(event):
if event.info['status'] == 1234:
event.preventDefault().addResponse([JobStatus.INACTIVE])
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=1234) # Should fail
with events.bound('jobs.status.validate', 'test', validateStatus), \
events.bound('jobs.status.validTransitions', 'test', validTransitions):
self.jobModel.updateJob(job, status=1234) # Should work
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=4321) # Should fail
def testValidateCustomStrStatus(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
def validateStatus(event):
states = ['a', 'b', 'c']
if event.info in states:
event.preventDefault().addResponse(True)
def validTransitions(event):
if event.info['status'] == 'a':
event.preventDefault().addResponse([JobStatus.INACTIVE])
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status='a')
with events.bound('jobs.status.validate', 'test', validateStatus), \
events.bound('jobs.status.validTransitions', 'test', validTransitions):
self.jobModel.updateJob(job, status='a')
self.assertEqual(job['status'], 'a')
with self.assertRaises(ValidationException), \
events.bound('jobs.status.validate', 'test', validateStatus):
self.jobModel.updateJob(job, status='foo')
def testUpdateOtherFields(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
job = self.jobModel.updateJob(job, otherFields={'other': 'fields'})
self.assertEqual(job['other'], 'fields')
def testCancelJob(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
# add to the log
job = self.jobModel.updateJob(job, log='entry 1\n')
# Reload without the log
job = self.jobModel.load(id=job['_id'], force=True)
self.assertEqual(len(job.get('log', [])), 0)
# Cancel
job = self.jobModel.cancelJob(job)
self.assertEqual(job['status'], JobStatus.CANCELED)
# Reloading should still have the log and be canceled
job = self.jobModel.load(id=job['_id'], force=True, includeLog=True)
self.assertEqual(job['status'], JobStatus.CANCELED)
self.assertEqual(len(job.get('log', [])), 1)
def testCancelJobEndpoint(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
# Ensure requires write perms
jobCancelUrl = '/job/%s/cancel' % job['_id']
resp = self.request(jobCancelUrl, user=self.users[1], method='PUT')
self.assertStatus(resp, 403)
# Try again with the right user
jobCancelUrl = '/job/%s/cancel' % job['_id']
resp = self.request(jobCancelUrl, user=self.users[0], method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['status'], JobStatus.CANCELED)
def testJobsTypesAndStatuses(self):
self.jobModel.createJob(title='user 0 job', type='t1', user=self.users[0], public=False)
self.jobModel.createJob(title='user 1 job', type='t2', user=self.users[1], public=False)
self.jobModel.createJob(title='user 1 job', type='t3', user=self.users[1], public=True)
self.jobModel.createJob(title='user 2 job', type='t4', user=self.users[2])
self.jobModel.createJob(title='anonymous job', type='t5')
self.jobModel.createJob(title='anonymous public job', type='t6', public=True)
# User 1, as non site admin, should encounter http 403 (Forbidden)
resp = self.request('/job/typeandstatus/all', user=self.users[1])
self.assertStatus(resp, 403)
# Admin user gets all types and statuses
resp = self.request('/job/typeandstatus/all', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['types']), 6)
self.assertEqual(len(resp.json['statuses']), 1)
# standard user gets types and statuses of its own jobs
resp = self.request('/job/typeandstatus', user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['types']), 2)
self.assertEqual(len(resp.json['statuses']), 1)
def testDefaultParentId(self):
job = self.jobModel.createJob(title='Job', type='Job', user=self.users[0])
# If not specified parentId should be None
self.assertEquals(job['parentId'], None)
def testIsParentIdCorrect(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
# During initialization parent job should be set correctly
self.assertEqual(childJob['parentId'], parentJob['_id'])
def testSetParentCorrectly(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(title='Child Job', type='Child Job', user=self.users[0])
self.jobModel.setParentJob(childJob, parentJob)
# After setParentJob method is called parent job should be set correctly
self.assertEqual(childJob['parentId'], parentJob['_id'])
def testParentCannotBeEqualToChild(self):
childJob = self.jobModel.createJob(title='Child Job', type='Child Job', user=self.users[0])
# Cannot set a job as it's own parent
with self.assertRaises(ValidationException):
self.jobModel.setParentJob(childJob, childJob)
def testParentIdCannotBeOverridden(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
anotherParentJob = self.jobModel.createJob(
title='Another Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
with self.assertRaises(ValidationException):
# If parent job is set, cannot be overridden
self.jobModel.setParentJob(childJob, anotherParentJob)
def testListChildJobs(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
self.jobModel.createJob(
title='Another Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
# Should return a list with 2 jobs
self.assertEquals(len(list(self.jobModel.listChildJobs(parentJob))), 2)
# Should return an empty list
self.assertEquals(len(list(self.jobModel.listChildJobs(childJob))), 0)
def testListChildJobsRest(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
self.jobModel.createJob(
title='Another Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
resp = self.request('/job', user=self.users[0],
params={'parentId': str(parentJob['_id'])})
resp2 = self.request('/job', user=self.users[0],
params={'parentId': str(childJob['_id'])})
self.assertStatusOk(resp)
self.assertStatusOk(resp2)
# Should return a list with 2 jobs
self.assertEquals(len(resp.json), 2)
# Should return an empty list
self.assertEquals(len(resp2.json), 0)
def testCreateJobRest(self):
resp = self.request('/job', method='POST',
user=self.users[0],
params={'title': 'job', 'type': 'job'})
# If user does not have the necessary token status is 403
self.assertStatus(resp, 403)
token = Token().createToken(scope=REST_CREATE_JOB_TOKEN_SCOPE)
resp2 = self.request(
'/job', method='POST', token=token, params={'title': 'job', 'type': 'job'})
# If user has the necessary token status is 200
self.assertStatusOk(resp2)
def testJobStateTransitions(self):
job = self.jobModel.createJob(
title='user 0 job', type='t1', user=self.users[0], public=False)
# We can't move straight to SUCCESS
with self.assertRaises(ValidationException):
job = self.jobModel.updateJob(job, status=JobStatus.SUCCESS)
self.jobModel.updateJob(job, status=JobStatus.QUEUED)
self.jobModel.updateJob(job, status=JobStatus.RUNNING)
self.jobModel.updateJob(job, status=JobStatus.ERROR)
# We shouldn't be able to move backwards
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.QUEUED)
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.RUNNING)
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.INACTIVE)
def testJobSaveEventModification(self):
def customSave(event):
kwargs = json_util.loads(event.info['kwargs'])
kwargs['key2'] = 'newvalue'
event.info['kwargs'] = json_util.dumps(kwargs)
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=True)
job['kwargs'] = {'key1': 'value1', 'key2': 'value2'}
with events.bound('model.job.save', 'test', customSave):
job = self.jobModel.save(job)
self.assertEqual(job['kwargs']['key2'], 'newvalue')
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter.ttk import *
import suapp.jandw
import suapp.simple_json as simple_json
from suapp.logdecorator import *
# Extra widgets
class MultiListbox(Frame):
def __init__(self, master, lists):
Frame.__init__(self, master)
self.lists = []
for l, w in lists:
frame = Frame(self)
frame.pack(side=LEFT, expand=YES, fill=BOTH)
Label(frame, text=l, borderwidth=1, relief=RAISED).pack(fill=X)
lb = Listbox(
frame,
width=w,
borderwidth=0,
selectborderwidth=0,
relief=FLAT,
exportselection=FALSE,
)
lb.pack(expand=YES, fill=BOTH)
self.lists.append(lb)
lb.bind("<B1-Motion>", lambda e, s=self: s._select(e.y))
lb.bind("<Button-1>", lambda e, s=self: s._select(e.y))
lb.bind("<Leave>", lambda e: "break")
lb.bind("<B2-Motion>", lambda e, s=self: s._b2motion(e.x, e.y))
lb.bind("<Button-2>", lambda e, s=self: s._button2(e.x, e.y))
frame = Frame(self)
frame.pack(side=LEFT, fill=Y)
Label(frame, borderwidth=1, relief=RAISED).pack(fill=X)
sb = Scrollbar(frame, orient=VERTICAL, command=self._scroll)
sb.pack(expand=YES, fill=Y)
self.lists[0]["yscrollcommand"] = sb.set
def _select(self, y):
row = self.lists[0].nearest(y)
self.selection_clear(0, END)
self.selection_set(row)
return "break"
def _button2(self, x, y):
for l in self.lists:
l.scan_mark(x, y)
return "break"
def _b2motion(self, x, y):
for l in self.lists:
l.scan_dragto(x, y)
return "break"
def _scroll(self, *args):
for l in self.lists:
apply(l.yview, args)
def curselection(self):
return self.lists[0].curselection()
def delete(self, first, last=None):
for l in self.lists:
l.delete(first, last)
def get(self, first, last=None):
result = []
for l in self.lists:
result.append(l.get(first, last))
if last:
return apply(map, [None] + result)
return result
def index(self, index):
self.lists[0].index(index)
def insert(self, index, *elements):
for e in elements:
i = 0
for l in self.lists:
l.insert(index, e[i])
i = i + 1
def size(self):
return self.lists[0].size()
def see(self, index):
for l in self.lists:
l.see(index)
def selection_anchor(self, index):
for l in self.lists:
l.selection_anchor(index)
def selection_clear(self, first, last=None):
for l in self.lists:
l.selection_clear(first, last)
def selection_includes(self, index):
return self.lists[0].selection_includes(index)
def selection_set(self, first, last=None):
for l in self.lists:
l.selection_set(first, last)
# END of extra widgets
class ToplevelWooster(Toplevel, suapp.jandw.Wooster):
closed = False
@loguse
def preClose(self):
pass
# Overriding this disables the [X]
@loguse
def destroy(self):
"""
When you click the [X] (or the parent window gets destroyed)
"""
self.closed = True
# Calling parent destroy() to actually destroy it.
self.preClose()
super().destroy()
@loguse
def close(self):
"""
Close as Wooster
"""
self.destroy()
ipsum = """Lorem ipsum dolor sit amet, consectetur adipiscing elit. \
Suspendisse pretium sapien sit amet magna viverra id faucibus nibh condimentum. \
Fusce dui magna, venenatis vel elementum eu, suscipit at quam. Nullam sed felis lorem, vel pretium leo. \
Cras at neque orci. \
Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. \
Pellentesque ullamcorper quam eu ligula mattis tempor. \
Aliquam erat volutpat. Vivamus feugiat aliquam purus, \
euismod blandit lacus scelerisque vel. \
Phasellus sodales dignissim eros nec accumsan. \
Aenean est lectus, placerat sed ornare ut, congue nec nulla. \
Phasellus at sapien purus. \
Maecenas et sapien at ipsum fringilla tempus.
Donec non quam mauris. \
Vivamus bibendum ante id velit rhoncus molestie. \
Nullam bibendum suscipit elit sed aliquam. \
Maecenas felis magna, laoreet ut volutpat eget, varius at lorem. \
Quisque nisl dolor, aliquet et vestibulum nec, ullamcorper quis libero. \
Aliquam erat volutpat. \
Aenean mauris augue, varius sit amet iaculis quis, rhoncus at eros. \
Phasellus luctus congue cursus. \
Praesent lobortis vestibulum cursus. \
Nulla euismod nisi ac diam semper molestie. \
Curabitur quis leo at velit ultrices consectetur in in tellus. \
Nunc auctor erat ac justo porttitor consectetur. \
Cras vestibulum elit nec nulla consectetur euismod. \
Curabitur sodales vehicula lacus quis iaculis. \
Nulla laoreet eros eget ipsum commodo aliquet.
Cras gravida egestas nunc a hendrerit. \
Quisque consequat arcu ac dolor bibendum a porttitor augue malesuada. \
Etiam suscipit augue ac eros lobortis auctor. \
Vivamus urna sem, ultrices non ultrices in, sodales id justo. \
Aenean gravida bibendum risus, a pharetra arcu rhoncus quis. \
Maecenas interdum fringilla dapibus. \
Sed non diam massa, a sodales eros. \
Cras interdum nibh leo. \
Praesent non urna turpis. \
Pellentesque posuere massa vel ligula porta sed egestas arcu lacinia. \
Curabitur rutrum ultrices vestibulum. \
Mauris posuere, arcu id tempor volutpat, lacus nisl euismod felis, non facilisis purus sem ac sem. \
Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. \
Mauris posuere ornare elit non congue. \
Praesent pretium, nisl eget pulvinar pulvinar, nibh purus sollicitudin tellus, eget malesuada lacus nulla vel neque.
In non elit nisi. \
Phasellus porta vestibulum erat non condimentum. \
Sed sed augue neque, sed venenatis eros. \
In et nunc neque. \
Mauris mi velit, dignissim id vehicula at, semper vitae est. \
Ut porta lacus eget ipsum faucibus vel luctus metus scelerisque. \
Duis ipsum nulla, rutrum sit amet ultricies et, sodales vitae sem. \
Donec suscipit aliquam diam, id mollis nibh gravida vel. \
Quisque in blandit lectus. \
Fusce interdum nulla turpis, sit amet venenatis metus. \
Praesent ac tristique tortor.
Suspendisse ut magna sed nulla cursus rutrum. \
Suspendisse in lacus dui, non bibendum tellus. Nulla facilisi. \
Morbi urna justo, volutpat eu mattis id, facilisis sed sapien. \
Vestibulum vitae sapien orci. \
Proin aliquet euismod neque, vel sodales nulla hendrerit vitae. \
Sed auctor euismod adipiscing. \
Curabitur feugiat dui quis lorem gravida a mattis eros vehicula. \
Aenean eget ullamcorper nunc. \
Mauris sagittis eros aliquam dolor varius sit amet eleifend dolor placerat. \
Vestibulum nec dui metus. \
Nullam nec ante in lectus faucibus condimentum. \
Donec ac enim purus, et bibendum erat. \
Aliquam vestibulum tempus consectetur."""
class TableWindow(ToplevelWooster):
name = "TABLE"
@loguse
def __init__(self, table, master=None):
Toplevel.__init__(self, master, class_="Table")
self.data = table
self.title("Table %s" % ("TODO"))
# Don't make it smaller then this
self.minsize(200, 100)
# So it doesn't get in the task bar as a separate window but as a child of the master/parent
self.transient(master)
self.grid()
@loguse
def goTo(self, key):
self.jeeves.drone(
self, "RECORD", self.jeeves.MODE_OPEN, {"key": key, "table": self.data}
)
@loguse
def goToFunction(self, key):
"""
Neede to create this intermediat function because 'key' isn't reused in the for loop and hence all buttons go to the last 'key'
"""
return lambda: self.goTo(key)
@loguse
def createWidgets(self):
row = 0
# if type(self.data) == type({}):
self.listbox = MultiListbox(
self, [["ID", 20], ["Represantation", 50]]
) # Listbox(self)
try:
for key in self.data:
# TODO: build in a maximum so we take maximum advantage of iterators
label = Button(self, text="%s" % (key), command=self.goToFunction(key))
label.grid(column=0, row=row, sticky=E + W)
text = Entry(self)
text.insert(0, "%s" % (self.data[key]))
text.config(state=DISABLED)
text.grid(column=1, row=row, sticky=E + W)
self.listbox.insert(END, ["%s" % (key), "%s" % (self.data[key])])
row += 1
except Exception as err:
logging.getLogger(self.__module__).error(
": TableWindow[%r].createWidgets : Not iterable? %s" % (self, err)
)
raise
self.listbox.grid(column=0, columnspan=2, row=row, sticky=E + W)
row += 1
self.buttonS = Button(self, text="Close", command=self.close)
self.buttonS.grid(column=0, row=row, sticky=E + W)
self.update()
@loguse
def inflow(self, jeeves, drone):
self.jeeves = jeeves
# TODO: Need to apply the filter in drone.dataobject on self.data
self.createWidgets()
class Table(suapp.jandw.Wooster):
@loguse
def inflow(self, jeeves, drone):
# TODO: should use the data API for this
population = {
"(131GAOC)298": {
"ID": "(131GAOC)298",
"band": "BGC/BR23/2011/298",
"father": "(AC62)131",
"mother": "GAOC",
"line": "GA25/(GAOC*)",
},
"(AC62)131": {
"ID": "(AC62)131",
"band": "BGC/BR23/2010/131",
"father": "AC",
"mother": "(GOVAYF)62",
"line": "GA25/AC*",
},
"AC": {"ID": "AC", "line": "GA25/AC"},
"(GOVAYF)62": {
"ID": "(GOVAYF)62",
"band": "BGC/BR23/2008/62",
"father": "GOc",
"mother": "VAYF",
"line": "FS2/GO*HH731/VAYF",
},
"GOc": {"ID": "GOc", "line": "FS2/GO"},
"VAYF": {"ID": "VAYF", "line": "HH731/VAYF"},
"GAOC": {"ID": "GAOC", "line": "GA25/GOAC"},
}
table = population
if "table" in drone.dataobject:
if "tables" in drone.dataobject:
if drone.dataobject["table"] in drone.dataobject["tables"]:
# print("Table: %s: %s" % (drone.dataobject['table'], drone.dataobject['tables'][drone.dataobject['table']]))
table = drone.dataobject["tables"][drone.dataobject["table"]]
if isinstance(drone.fromvertex, Frame):
logging.getLogger(self.__module__).debug(
": TableFactory[%r].inflow : Using parent" % (self)
)
# TODO: if it has been closed/destroyed then give the parent of fromvertex instead?
window = TableWindow(table, drone.fromvertex)
else:
logging.getLogger(self.__module__).debug(
": TableFactory[%r].inflow : Not using parent" % (self)
)
window = TableWindow(table)
window.inflow(jeeves, drone)
window.lift()
window.focus_set()
class RecordWindow(ToplevelWooster):
@loguse
def __init__(self, master=None):
Toplevel.__init__(self, master, class_="About")
self.title("Record")
# Don't make it smaller than this.
self.minsize(50, 50)
# So it doesn't get in the task bar as a separate window but as a child of the master/parent.
self.transient(master)
self.grid()
@loguse
def __edit(self, key):
print("Key: %s" % (key)) # DELME
# If the value is composite (map), open a RecordWindow modal
# If the value is singular, open a EditWindow modal
return
@loguse
def __editFunction(self, key):
return lambda: self.__edit(key)
@loguse
def __style(self):
s = Style()
styles = s.theme_names()
i = styles.index(s.theme_use())
x = styles[0]
try:
x = styles[i + 1]
except:
pass
s.theme_use(x)
return "SWITCHING TO STYLE %s" % (x)
@loguse
def createWidgets(self):
row = 0
try:
for key in self.table.configuration["fields"]:
if "auto_increment" in self.table.configuration["fields"][key]:
if self.table.configuration["fields"][key]["auto_increment"]:
continue
label = Label(self, text="%s: " % (key))
label.grid(column=0, row=row, sticky=E)
text = Entry(self) # , textvariable=self.data[key])
if key in self.data:
if "toVisual" in self.table.configuration["fields"][key]:
text.insert(
0,
"%s"
% self.table.configuration["fields"][key]["toVisual"](
self.data[key]
),
)
else:
text.insert(0, "%s" % (self.data[key]))
text.config(state=DISABLED)
text.grid(column=1, row=row, sticky=E + W)
button = Button(
self, text="\u2026", width=2, command=self.__editFunction(key)
) # Black pointing left index finger: \u261a ; Triangle: \u2023 ; Tripple bullet: \u2026
button.grid(column=2, row=row, sticky=E)
self.columnconfigure(2, weight=0)
row += 1
except Exception as err:
logging.getLogger(self.__module__).error(
": RecordWindow[%r].createWidgets : Not iterable? %s" % (self, err)
)
raise
self.buttonS = Button(self, text="Save", command=self.close) # u25b6
self.buttonS.grid(column=0, row=row)
self.buttonC = Button(self, text="Cancel", command=self.__style) # u25b6
self.buttonC.grid(column=1, row=row, sticky=W)
self.update()
@loguse
def inflow(self, jeeves, drone):
self.jeeves = jeeves
self.table = drone.dataobject.get("table", None)
self.data = drone.dataobject.get("object", drone.dataobject.get("key", None))
self.createWidgets()
class Record(suapp.jandw.Wooster):
"""
Note: it will open a new window. If you want to avoid doubles open, use subclass UniqueRecordFactory
"""
@loguse
def inflow(self, jeeves, drone):
if isinstance(drone.fromvertex, Frame):
logging.getLogger(self.__module__).debug(
": RecordFactory[%r].inflow : Using parent" % (self)
)
# TODO: if it has been closed/destroyed then give the parent of fromvertex instead?
RecordWindow(drone.fromvertex).inflow(jeeves, drone)
else:
logging.getLogger(self.__module__).debug(
": RecordFactory[%r].inflow : Not using parent" % (self)
)
RecordWindow().inflow(jeeves, drone)
class UniqueRecord(Record):
windowreferences = {}
@loguse
def inflow(self, jeeves, drone):
# TODO replace the id and object!
table = drone.dataobject["table"]
if "key" in drone.dataobject:
key = drone.dataobject["key"]
elif "object" in drone.dataobject:
object = drone.dataobject["object"]
key = table.getKey(object)
# We looked it up so we just as wel might put it in.
drone.dataobject["key"] = key
logging.getLogger(self.__module__).debug(
": UniqueRecordFactory[%r].inflow : windowreferences: %s in %s"
% (self, key, self.windowreferences)
)
if key in self.windowreferences:
logging.getLogger(self.__module__).debug(
": UniqueRecordFactory[%r].inflow : Reusing for %s" % (self, key)
)
if self.windowreferences[key].closed:
logging.getLogger(self.__module__).debug(
": UniqueRecordFactory[%r].inflow : Oops that one is already closed so not reusing it."
% (self)
)
del self.windowreferences[key]
if key not in self.windowreferences:
if isinstance(drone.fromvertex, Frame):
logging.getLogger(self.__module__).debug(
": UniqueRecordFactory[%r].inflow : Using parent" % (self)
)
# TODO: if it has been closed/destroyed then give the parent of fromvertex instead?
self.windowreferences[key] = RecordWindow(drone.fromvertex)
else:
logging.getLogger(self.__module__).debug(
": UniqueRecordFactory[%r].inflow : Not using parent" % (self)
)
self.windowreferences[key] = RecordWindow()
self.windowreferences[key].inflow(jeeves, drone)
self.windowreferences[key].lift()
self.windowreferences[key].focus_set()
class AboutWindow(ToplevelWooster):
@loguse
def __init__(self, master=None):
Toplevel.__init__(self, master, class_="About")
self.title("About")
# Don't make it smaller then this
self.minsize(300, 200)
# So it doesn't get in the task bar as a separate window but as a child of the master/parent
self.transient(master)
self.grid()
self.createWidgets()
@loguse
def createWidgets(self):
self.text = Text(self, wrap=WORD)
self.text.config(state=DISABLED)
self.text.grid()
self.button = Button(self, text="Close", command=self.close)
self.button.grid()
self.update()
@loguse
def inflow(self, jeeves, drone):
self.jeeves = jeeves
# Looking for txt file
text = []
file_name = self.jeeves.app.configuration["self"].rsplit(".", 1)[0]
file_name += ".txt"
try:
with open(file_name) as fh:
for line in fh:
text.append(line)
except OSError:
logging.getLogger(self.__module__).warning(
"Could not open about file %s.", file_name, exc_info=sys.exc_info()
)
except IOError:
logging.getLogger(self.__module__).warning(
"Could not open about file %s.", file_name, exc_info=sys.exc_info()
)
if not text:
text = ["ERROR: Could not open file %s." % (file_name)]
self.text.config(state=NORMAL)
self.text.delete("1.0", END)
self.text.insert(END, "\n".join(text))
self.text.config(state=DISABLED)
self.update()
class About(suapp.jandw.Wooster):
"""
Perhaps I should replace the FACTORY with just a FUNCTION (though it doesn't make a lot of difference in python)
"""
@loguse
def __init__(self):
self.window = None
@loguse
def inflow(self, jeeves, drone):
if self.window:
if self.window.closed:
self.window = None
if not self.window:
if isinstance(drone.fromvertex, Frame):
logging.getLogger(self.__module__).debug(
": About[%r].inflow : Using parent" % (self)
)
# TODO: if it has been closed/destroyed then give the parent of fromvertex instead?
self.window = AboutWindow(drone.fromvertex)
else:
logging.getLogger(self.__module__).debug(
": About[%r].inflow : Not using parent" % (self)
)
self.window = AboutFrame()
self.window.inflow(jeeves, drone)
self.window.focus_set()
class Application(Frame, suapp.jandw.Wooster):
name = "APP"
dataobject = {"name": "SuApp"}
testdata = {"ID": "(150112)164", "ring": "BGC/BR23/10/164"}
tables = {}
@loguse
def __init__(self, master=None):
Frame.__init__(self, master, class_="Application")
# Default title, could be overriden in inflow
self._root().title("SuApp")
self._root().minsize(400, 300)
self.grid()
# Moved to inflow self.createWidgets()
def openTableWindow(self, tablename):
"""
Neede to create this intermediat function because 'key' isn't reused in the for loop and hence all buttons go to the last 'key'
"""
return lambda: self.__testTable(tablename)
@loguse
def createWidgets(self):
top = self.winfo_toplevel()
self.menuBar = Menu(top, tearoff=0)
top["menu"] = self.menuBar
self.menuFile = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="File", menu=self.menuFile)
self.menuFile.add_command(label="Quit", command=self.close)
self.menuRecord = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="Record", menu=self.menuRecord)
self.menuRecord.add_command(
label="Test record (\u20ac)", command=self.__testRecord
)
self.menuTable = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="Table", menu=self.menuTable)
for table in self.tables:
self.menuTable.add_command(
label="%s" % (table), command=self.openTableWindow(table)
)
self.menuHelp = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="Help", menu=self.menuHelp)
self.menuHelp.add_command(label="About", command=self.__about)
self.menuHelp.add_command(label="Configuration", command=self.__configuration)
@loguse
def __testTable(self, tablename):
self.jeeves.drone(
self,
"TABLE",
self.jeeves.MODE_OPEN,
{"tables": self.tables, "table": tablename},
)
@loguse
def __testRecord(self):
self.jeeves.drone(
self,
"RECORD",
self.jeeves.MODE_OPEN,
{"table": self.tables["organism"], "object": self.testdata},
)
@loguse
def __about(self):
self.jeeves.drone(self, "ABOUT", self.jeeves.MODE_MODAL, None)
@loguse
def __configuration(self):
self.jeeves.drone(self, "CONFIGURATION", self.jeeves.MODE_MODAL, None)
@loguse
def inflow(self, jeeves, drone):
if drone.dataobject:
self.dataobject = drone.dataobject
if "name" not in self.dataobject:
self.dataobject["name"] = "SuApp"
self._root().title(self.dataobject["name"])
if "tables" in self.dataobject:
logging.getLogger(self.__module__).debug(
": Application[%r].inflow() : Setting tables." % (self)
)
self.tables = self.dataobject["tables"]
self.jeeves = jeeves
self.createWidgets() # Perhaps this needs to be in mainloop() so we can do a refresh?
self.mainloop()
@loguse
def lock(self):
pass
@loguse
def unlock(self):
pass
@loguse
def destroy(self):
"""
When you click the [X]
"""
self.preClose()
super().destroy()
@loguse
def preClose(self):
pass
@loguse
def close(self):
"""
Close as Wooster
"""
self.destroy()
self.quit()
class ConfigurationWindow(ToplevelWooster):
@loguse
def __init__(self, master=None):
Toplevel.__init__(self, master, class_="Configuration")
self.title("Configuration")
# Don't make it smaller then this
self.minsize(300, 200)
# So it doesn't get in the task bar as a separate window but as a child of the master/parent
self.transient(master)
self.grid()
self.createWidgets()
@loguse
def createWidgets(self):
self.text = Text(self, wrap=WORD)
self.text.config(state=DISABLED)
self.text.grid()
self.button = Button(self, text="Close", command=self.close)
self.button.grid()
self.update()
@loguse
def inflow(self, jeeves, drone):
self.jeeves = jeeves
self.text.config(state=NORMAL)
self.text.delete(1.0, END)
print(type(self.jeeves.app.configuration))
title = "Configuration:"
self.text.insert(
END,
"%s\n\n%s"
% (
title,
simple_json.dumps(dict(self.jeeves.app.configuration), indent=" "),
),
)
self.text.tag_add("title", "1.0", "1.%s" % len(title))
self.text.tag_config("title", background="black", foreground="yellow")
self.text.config(state=DISABLED)
self.update()
class Configuration(suapp.jandw.Wooster):
name = "CONFIGURATION"
"""
Perhaps I should replace the FACTORY with just a FUNCTION (though it doesn't make a lot of difference in python)
"""
@loguse
def __init__(self):
self.window = None
@loguse
def inflow(self, jeeves, drone):
if self.window:
if self.window.closed:
self.window = None
if not self.window:
if isinstance(drone.fromvertex, Frame):
logging.getLogger(self.__module__).debug(
": Configuration[%r].inflow : Using parent" % (self)
)
# TODO: if it has been closed/destroyed then give the parent of fromvertex instead?
self.window = ConfigurationWindow(drone.fromvertex)
else:
logging.getLogger(self.__module__).debug(
": Configuration[%r].inflow : Not using parent" % (self)
)
self.window = AboutFrame()
self.window.inflow(jeeves, drone)
self.window.focus_set()
class View(suapp.jandw.Wooster):
"""
Generic view page for showing.
"""
@loguse("@") # Not logging the return value.
def inflow(self, jeeves, drone):
"""
Entry point for the view.
"""
self.jeeves = jeeves
# Getting the flow (usually uppercase) and ref (lowercase) names.
flow_name = drone.name
ref_name = flow_name.lower()
name = ref_name.capitalize()
# Getting the view definition.
definition = jeeves.views.get(flow_name, {})
# Getting the session, params and preparing the scope.
session = drone.dataobject.get("session", {})
# Setting default dabase paging parameters for the query.
query_params = {"pagenum": 1, "pagesize": 5}
# Getting the http request params.
for param in drone.dataobject["params"]:
query_params[param] = drone.dataobject["params"][param][0]
scope = {} # NOTUSED
# scope.update(jeeves.ormscope) # jeeves.ormscope is always empty.
# JS parameters
js_params = {}
if drone.dataobject:
js_params[
"query"
] = "query/%(query)s?pagenum=%(pagenum)s&pagesize=%(pagesize)s"
result = []
js_params["service_url"] = jeeves.app.configuration["httpd"]["service_url"]
""""
# Title
title = definition.get('name', name)
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG title = %s -->' % (title))
def_tabs = definition.get('tabs', {0: {'title': ''}})
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG def_tabs = %s -->' % (def_tabs))
tabs = collections.OrderedDict()
tab_count = 0
if 'query' in def_tabs:
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG query = %s -->' % (def_tabs['query']))
tab_title = def_tabs.get('title', name)
tab_objects = self.jeeves.do_query(def_tabs['query'], params=query_params)
parameters = self.jeeves.pre_query(def_tabs['query'], params=query_params)[1]
parameters['query'] = def_tabs['query']
js_query_params = with_expanded_values(js_params, params=parameters)
js_query_params['view'] = "testview" # TODO
html.append(View.raw_js % (js_query_params))
# TESTVIEW
html.append('<table id="testview">')
html.append('</table>')
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG tab_objects = %s -->' % (tab_objects))
for tab in tab_objects:
if tab_title[0] == ".":
tabs[tab_count] = (getattr(tab, tab_title[1:]), tab)
else:
tabs[tab_count] = (tab_title, tab)
tab_count += 1
else:
# Loop over all integer keys and get out the titles.
for i in def_tabs:
# TODO: is this second element in the tuple correct?
tabs[i] = (def_tabs[i]['title'], def_tabs[i])
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG tabs: %s -->' % (tabs))
# Tab headers
html.append('<ul class="nav nav-tabs">')
for i in sorted(tabs): # CHECKME: DO WE NEED SORTED HERE?
if i is 0:
html.append('\t<li class="active"><a data-toggle="tab" href="#tab%s">%s</a></li>' % (i, tabs[i][0]))
else:
html.append('\t<li><a data-toggle="tab" href="#tab%s">%s</a></li>' % (i, tabs[i][0]))
html.append('</ul>')
# Tabs
html.append('<div class="tab-content">')
for i in sorted(tabs):
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG tab: %s = %s -->' % (i, tabs[i]))
if i is 0:
html.append('\t<div id="tab%s" class="tab-pane fade in active">' % (i))
else:
html.append('\t<div id="tab%s" class="tab-pane fade">' % (i))
# Sections
sections = tabs[i][1].get('sections', definition.get('sections', {0: {'title': ''}}))
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG sections: %s -->' % (sections))
for s in sorted(sections.keys(), key=str):
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG section: %s -->' % (s))
if not str(s).isdigit():
continue
section_title = sections[s].get('title', '')
html.append('\t\t<div class="panel panel-default">') # panel-primary <> panel-default ?
if section_title != tabs[i][0]:
html.append('\t\t\t<div class="panel-heading">%s</div>' % (section_title))
html.append('\t\t\t<div class="panel-body" id="section%s_%s">' % (i, s))
# Lines
lines = sections[s].get('lines', tabs.get('lines', definition.get('sections', {0: {'title': ''}})))
# DEBUGGING
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG lines = %s -->' % (lines))
line_objects = []
if 'query' in lines:
line_objects = self.jeeves.do_query(lines['query'], params=query_params)
query_template, parameters = self.jeeves.pre_query(lines['query'], params=query_params)
parameters['query'] = lines['query']
js_query_params = with_expanded_values(js_params, params=parameters)
js_query_params['view'] = "section%s_%s" % (i, s)
if 'elements' in lines:
for e in sorted(lines['elements']):
value = lines['elements'][e].get('value', '#')
element_type = lines['elements'][e].get('type', 'label').lower()
outmessage = lines['elements'][e].get('outmessage', '')
if value[0] == ".":
value = '\' + data["objects"][elementid]["' + value[1:] + '"] + \''
html_element = " "
if element_type == "button":
# Button
html_element = View._button_as_button(
value=value,
outmessage=outmessage
)
else:
# Label
pass
js_query_params['html'] = html_element
html.append(View.raw_js % (js_query_params))
else:
for line_object in line_objects:
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG line_object = %s (%s in %s) -->' % (line_object, type(line_object), line_object.__module__))
line_elements = []
# Line elements
if 'elements' in lines:
for e in sorted(lines['elements']):
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG element = %s -->' % (e))
value = lines['elements'][e].get('value', '#')
element_type = lines['elements'][e].get('type', 'label').lower()
outmessage = lines['elements'][e].get('outmessage', '')
if value[0] == ".":
value = getattr(line_object, value[1:])
if element_type == "button":
html.append("\t\t\t\t" +
View.button(
value=value,
outmessage=outmessage,
module=line_object.__module__,
table=line_object.__class__.__name__.split("_")[-1],
key=line_object._pk_
)
)
else:
html.append("\t\t\t\t" + View.label(value=value))
for l in sorted(lines.keys(), key=str):
if str(l).isdigit():
# Line elements
if 'elements' in lines[l]:
for e in sorted(lines[l]['elements']):
if logging.getLogger(self.__module__).isEnabledFor(logging.DEBUG):
html.append('<!-- DEBUG element = %s -->' % (e))
value = lines[l]['elements'][e].get('value', '#')
l_type = lines[l]['elements'][e].get('type', 'button').lower()
outmessage = lines[l]['elements'][e].get('outmessage', '')
if value[0] == ".":
value = getattr(line_object, value[1:])
if element_type == "button":
html.append("\t\t\t" +
View.button(
value=value,
outmessage=outmessage,
module=line_object.__module__,
table=line_object.__class__.__name__.split("_")[-1],
key=line_object._pk_
)
)
else:
html.append("\t\t\t" + View.label(value=value))
html.append('\t\t\t</div>')
html.append('\t\t</div>')
html.append('\t</div>')
html.append('</div>')
return (title, "\n".join(html))
"""
if __name__ == "__main__":
logging.getLogger(__name__).setLevel(logging.DEBUG) # DEBUG/INFO
flow = suapp.jandw.Jeeves()
flow.flow = {
"": {
"START": suapp.jandw.Drone("START", Application()),
"RECORD": suapp.jandw.Drone("RECORD", UniqueRecord()),
},
"APP": {
"ABOUT": suapp.jandw.Drone("ABOUT", About()),
"TABLE": suapp.jandw.Drone("TABLE", Table()),
},
}
flow.start()
|
|
import runStatus
import logSetup
import logging
import gc
import time
import os
import multiprocessing
import signal
import logging
import logSetup
import cProfile
import traceback
import threading
import sys
import queue
# from pympler.tracker import SummaryTracker, summary, muppy
# import tracemalloc
import sqlalchemy.exc
from sqlalchemy.sql import text
from sqlalchemy.sql import func
if '__pypy__' in sys.builtin_module_names:
import psycopg2cffi as psycopg2
else:
import psycopg2
is_pypy = '__pypy__' in sys.builtin_module_names
if __name__ == "__main__":
logSetup.initLogging()
import config
import runStatus
# import Misc.install_vmprof
import WebMirror.Engine
import WebMirror.rules
import common.util.urlFuncs as urlFuncs
import common.database as db
import WebMirror.JobDispatcher
import WebMirror.UrlUpserter
import RawArchiver.RawJobDispatcher
import RawArchiver.RawRunner
import WebMirror.Runner
URL_UPSERTER_QUEUE_SIZE = 25000
MAX_IN_FLIGHT_JOBS = 250
class MultiJobManager(object):
def __init__(self, max_tasks, target, target_args=None, target_kwargs=None):
self.max_tasks = max_tasks
self.target = target
self.target_args = target_args if target_args else ()
self.target_kwargs = target_kwargs if target_kwargs else {}
self.procno = 0
self.log = logging.getLogger("Main.Job.Launcher")
self.tasklist = {}
for x in range(max_tasks):
self.tasklist[x] = None
def check_run_jobs(self):
print(("Tasklist: ", self.tasklist))
living = sum([task.is_alive() for task in self.tasklist.values() if task])
dead = []
for x in range(self.max_tasks):
self.log.info("Checking runstate of %s -> %s", x, self.tasklist[x] and self.tasklist[x].is_alive())
if not self.tasklist[x] or not self.tasklist[x].is_alive():
self.log.info("Thread %s appears to not be alive!", x)
self.log.warning("Insufficent living child threads! Creating another thread with number %s", self.procno)
self.log.info("Target func: %s", self.target)
if self.tasklist[x]:
dead.append(self.tasklist[x])
args = (self.procno, )
kwargs = self.target_kwargs
kwargs['total_worker_count'] = self.max_tasks
kwargs['worker_num'] = x
if self.target_args:
args += self.target_args
with logSetup.stdout_lock:
proc = multiprocessing.Process(target=self.target, args=args, kwargs=kwargs)
# proc = threading.Thread(target=self.target, args=(self.procno, ) + self.target_args, kwargs=self.target_kwargs)
proc.start()
while not ( # Wait until
proc.is_alive() # the process is running
or proc.exitcode != None): # or has exited
time.sleep(0.25)
self.log.info("Waiting for process to start!")
self.tasklist[x] = proc
self.procno += 1
cleaned = 0
for dead_task in dead:
dead_task.join()
cleaned += 1
if cleaned > 0:
self.log.warning("Run manager cleared out %s exited task instances.", cleaned)
return len(self.tasklist)
def join_jobs(self, flushqueues):
self.log.info("Run manager waiting on tasks to exit. Runstate = %s", runStatus.run_state.value)
while 1:
living = sum([task and task.is_alive() for task in self.tasklist.values()])
self.log.info("Living processes: '%s'", living)
for task in self.tasklist.values():
task.join(3.0/(living+1))
for job_queue in flushqueues:
try:
while 1:
job_queue.get_nowait()
except queue.Empty:
pass
living = sum([task and task.is_alive() for task in self.tasklist.values()])
if living == 0:
break
class Crawler(object):
def __init__(self,
main_thread_count,
raw_thread_count,
lowrate,
):
self.lowrate = lowrate
self.process_lookup = {}
self.log = logging.getLogger("Main.Text.Manager")
WebMirror.rules.load_rules()
self.log.info("Scraper executing with %s main processes, %s raw scraper threads.", main_thread_count, raw_thread_count)
self.main_thread_count = main_thread_count
self.raw_thread_count = raw_thread_count
def start_aggregator(self):
agg_queue = multiprocessing.Queue(maxsize=URL_UPSERTER_QUEUE_SIZE)
with logSetup.stdout_lock:
self.main_job_agg = multiprocessing.Process(target=WebMirror.UrlUpserter.UpdateAggregator.launch_agg, args=(agg_queue, ))
self.main_job_agg.start()
return agg_queue
def join_aggregator(self):
self.log.info("Asking Aggregator process to stop.")
runStatus.agg_run_state.value = 0
if hasattr(self, 'main_job_agg'):
while 1:
try:
self.main_job_agg.join(timeout=1)
break
except multiprocessing.TimeoutError:
print("Failed to join main_job_agg")
self.log.info("Aggregator joined.")
def start_main_job_fetcher(self):
self.main_job_fetcher = WebMirror.JobDispatcher.RpcJobManagerWrapper(lowrate=self.lowrate)
return self.main_job_fetcher.get_queues()
def start_raw_job_fetcher(self):
self.raw_job_fetcher = RawArchiver.RawJobDispatcher.RawJobFetcher()
return self.raw_job_fetcher.get_queue()
def join_job_fetcher(self):
if hasattr(self, 'main_job_fetcher'):
self.log.info("Asking main job source task to halt.")
self.main_job_fetcher.join_proc()
if hasattr(self, 'raw_job_fetcher'):
self.log.info("Asking raw job source task to halt.")
self.raw_job_fetcher.join_proc()
self.log.info("Job source halted.")
def launchProcessesFromQueue(self, processes, job_in_queue):
pass
def run_raw(self):
assert self.main_thread_count >= 1
assert self.raw_thread_count >= 1
# Dummy queues to shut up the teardown garbage
new_url_aggreator_queue = multiprocessing.Queue(maxsize=MAX_IN_FLIGHT_JOBS * 2)
raw_new_job_queue = self.start_raw_job_fetcher()
raw_kwargs = {
'response_queue' : new_url_aggreator_queue,
'new_job_queue' : raw_new_job_queue,
'cookie_lock' : runStatus.cookie_lock,
}
rawManager = MultiJobManager(max_tasks=self.raw_thread_count, target=RawArchiver.RawRunner.RawRunInstance.run, target_kwargs=raw_kwargs)
managers = [rawManager]
drain_queues = [raw_new_job_queue]
flush_queues = [new_url_aggreator_queue, raw_new_job_queue]
self.status_call = self.raw_job_fetcher.get_status
self.running_call = self.raw_job_fetcher.is_running
self._runloop(managers, drain_queues)
self._teardown(managers, drain_queues, flush_queues)
def run(self):
assert self.main_thread_count >= 1
assert self.raw_thread_count >= 1
new_url_aggreator_queue = self.start_aggregator()
main_new_job_queue = self.start_main_job_fetcher()
# Misc.install_vmprof.install_vmprof("main_thread")
# # cls, num, response_queue, new_job_queue, cookie_lock
main_kwargs = {
'response_queue' : new_url_aggreator_queue,
'new_job_queue' : main_new_job_queue,
'cookie_lock' : runStatus.cookie_lock,
}
mainManager = MultiJobManager(max_tasks=self.main_thread_count, target=WebMirror.Runner.RunInstance.run, target_kwargs=main_kwargs)
managers = [mainManager]
drain_queues = [main_new_job_queue]
flush_queues = [new_url_aggreator_queue, main_new_job_queue]
self.status_call = self.main_job_fetcher.get_status
self.running_call = self.main_job_fetcher.is_running
self._runloop(managers, drain_queues)
self._teardown(managers, drain_queues, flush_queues)
def _runloop(self, managers, drain_queues):
cnt = 10
while runStatus.run_state.value:
try:
time.sleep(1)
cnt += 1
if cnt >= 10:
cnt = 0
living = sum([manager.check_run_jobs() for manager in managers])
clok_locked = runStatus.cookie_lock.acquire(block=False)
if clok_locked:
runStatus.cookie_lock.release()
self.log.info("Living processes: %s (Cookie lock acquired: %s, queue sizes: %s, exiting: %s)",
living, not clok_locked, [q.qsize() for q in drain_queues], runStatus.run_state.value == 0)
self.log.info("Job Queue Fillers: %s ", self.status_call())
if not self.running_call():
self.log.error("Job fetcher is dead. Aborting!")
runStatus.run_state.value = 0
if is_pypy:
collected = gc.collect()
self.log.info("Collected %s object with garbage collector", collected)
except KeyboardInterrupt:
self.log.info("Control C caught. Stopping scraper.")
runStatus.run_state.value = 0
break
except Exception:
print("Wat?")
traceback.print_exc()
with open("error %s.txt" % time.time(), "w") as fp:
fp.write("Manager crashed?\n")
fp.write(traceback.format_exc())
break
def _teardown(self, managers, drain_queues, flush_queues):
# Stop the job fetcher, and then let the active jobs
# flush down.
self.join_job_fetcher()
runStatus.run_state.value = 0
self.log.info("Crawler allowing ctrl+c to propagate.")
time.sleep(1)
runStatus.run_state.value = 0
time.sleep(1)
for manager in managers:
manager.join_jobs(drain_queues)
self.log.info("All processes halted.")
self.log.info("Flusing queues")
for job_queue in flush_queues:
try:
while 1:
job_queue.get_nowait()
except queue.Empty:
pass
self.join_aggregator()
|
|
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test.client import Client, RequestFactory
from django_facebook import exceptions as facebook_exceptions, \
settings as facebook_settings, signals
from django_facebook.api import get_facebook_graph, FacebookUserConverter, \
get_persistent_graph
from django_facebook.auth_backends import FacebookBackend
from django_facebook.connect import _register_user, connect_user, \
CONNECT_ACTIONS
from django_facebook.middleware import FacebookCanvasMiddleWare
from django_facebook.test_utils.mocks import RequestMock
from django_facebook.test_utils.testcases import FacebookTest, LiveFacebookTest
from django_facebook.utils import cleanup_oauth_url, get_profile_model, \
ScriptRedirect, get_user_model, get_user_attribute, try_get_profile, \
get_instance_for_attribute, update_user_attributes, get_registration_backend
from functools import partial
from mock import Mock, patch
from open_facebook.api import FacebookConnection, FacebookAuthorization, \
OpenFacebook
from open_facebook.exceptions import FacebookSSLError, FacebookURLError
import logging
import mock
from django.utils import six
from django_facebook.models import OpenGraphShare
from django.contrib.contenttypes.models import ContentType
from open_facebook.exceptions import FacebookUnreachable, OAuthException
logger = logging.getLogger(__name__)
__doctests__ = ['django_facebook.api']
class BaseDecoratorTest(FacebookTest):
def setUp(self):
FacebookTest.setUp(self)
from django_facebook.decorators import facebook_required
self.decorator = facebook_required
self.decorator_name = 'FacebookRequired'
def test_naming(self):
self.assertEqual(self.decorator.__name__, self.decorator_name)
def test_wrapping(self):
'''
Verify that the decorator wraps the original function
'''
@self.decorator
def myfunc(request):
'''docs'''
pass
self.assertEqual(myfunc.__doc__, 'docs')
self.assertEqual(myfunc.__name__, 'myfunc')
@self.decorator()
def myfunc2(request):
'''docs2'''
pass
self.assertEqual(myfunc2.__doc__, 'docs2')
self.assertEqual(myfunc2.__name__, 'myfunc2')
class DecoratorTest(BaseDecoratorTest):
'''
Verify that the lazy and facebook_required decorator work as expected
Facebook required decorator
If you have the permissions proceed
Else show the login screen
If you allow, proceed
If you click cancel ...
Facebook required lazy
Proceed
Upon OAuthException, go to login screen
If you allow proceed
If you click cancel ...
'''
def setUp(self):
BaseDecoratorTest.setUp(self)
self.url = reverse('facebook_decorator_example')
target_url = r'''https://www.facebook.com/dialog/oauth?scope=email%2Cuser_about_me%2Cuser_birt
hday%2Cuser_website&redirect_uri=http%3A%2F%2Ftestserver%2Ffacebook%2Fdecorator_
example%2F%3Fattempt%3D1&client_id=215464901804004
'''.replace(' ', '').replace('\n', '')
self.target_url = target_url
from django_facebook.decorators import facebook_required
self.decorator = facebook_required
def test_decorator_not_authenticated(self):
'''
We should redirect to Facebook oauth dialog
'''
response = self.client.get(self.url, follow=True)
if six.PY3:
self.assertEqual(response.redirect_chain[0][1], 302)
else:
self.assertRedirects(
response, self.target_url, target_status_code=404)
def test_decorator_authenticated(self):
'''
Here we fake that we have permissions
This should enter the view and in this test return "authorized"
'''
self.mock_authenticated()
response = self.client.get(self.url, follow=True)
if type(response.content) is six.binary_type:
self.assertEqual(response.content.decode(), 'authorized')
else:
self.assertEqual(response.content, 'authorized')
def test_decorator_denied(self):
'''
Here the users denies our app. Facebook adds this in the url
attempt=1&error_reason=user_denied&error=access_denied&error_description=The+user+denied+your+request.
'''
query_dict_string = 'attempt=1&error_reason=user_denied&error=access_denied&error_description=The+user+denied+your+request.'
get = QueryDict(query_dict_string, True)
denied_url = '%s?%s' % (self.url, get.urlencode())
response = self.client.get(denied_url, follow=True)
if type(response.content) is six.binary_type:
self.assertEqual(response.content.decode(), 'user denied or error')
else:
self.assertEqual(response.content, 'user denied or error')
class ScopedDecoratorTest(DecoratorTest):
'''
Tests the more complicated but faster lazy decorator
'''
def setUp(self):
DecoratorTest.setUp(self)
self.url = reverse('facebook_decorator_example_scope')
target_url = r'https://www.facebook.com/dialog/oauth?scope=publish_actions%2Cuser_status&redirect_uri=http%3A%2F%2Ftestserver%2Ffacebook%2Fdecorator_example_scope%2F%3Fattempt%3D1&client_id=215464901804004'
self.target_url = target_url
def test_type_error(self):
self.mock_authenticated()
@self.decorator
def myview(request, graph):
def inner(a, b):
pass
inner(1, 2, c='nono')
to_fail = partial(myview, self.request)
try:
to_fail()
except TypeError as e:
right_error = "inner() got an unexpected keyword argument 'c'"
self.assertEqual(str(e), right_error)
class LazyDecoratorTest(DecoratorTest):
'''
Tests the more complicated but faster lazy decorator
'''
def setUp(self):
DecoratorTest.setUp(self)
self.url = reverse('facebook_lazy_decorator_example')
target_url = r'''https://www.facebook.com/dialog/oauth?scope=email%2Cuser_about_me%2Cuser_birt
hday%2Cuser_website&redirect_uri=http%3A%2F%2Ftestserver%2Ffacebook%2Flazy_decorator_
example%2F%3Fattempt%3D1&client_id=215464901804004
'''.replace(' ', '').replace('\n', '')
self.target_url = target_url
from django_facebook.decorators import facebook_required_lazy
self.decorator = facebook_required_lazy
self.decorator_name = 'FacebookRequiredLazy'
class GraphAccessTest(FacebookTest):
def test_get_persistent(self):
graph = get_persistent_graph(self.request)
# fake that we are authenticated and have a facebook graph
with patch.object(self.request, 'facebook'):
self.request.user = get_user_model().objects.all()[:1][0]
graph = get_persistent_graph(self.request)
class ConnectViewTest(FacebookTest):
def setUp(self):
FacebookTest.setUp(self)
self.base_url = base_url = 'http://testserver'
self.absolute_default_url = base_url + \
facebook_settings.FACEBOOK_LOGIN_DEFAULT_REDIRECT
self.url = reverse('facebook_connect')
self.absolute_url = base_url + reverse('facebook_connect')
self.example_url = reverse('facebook_example')
self.absolute_example_url = base_url + reverse('facebook_example')
def test_connect_redirect(self):
'''
The redirect flow for facebook works as follows
- request the decorated url, /facebook/connect/
- the decorator (facebook_required) redirect the user to the oauth url
- after accepting the auth dialog facebook redirects us to the next url
'''
# STEP 1, verify that we redirect to facebook with the correct details
response = self.client.post(
self.url, next=self.example_url, follow=True)
redirect_url = response.redirect_chain[0][0]
oauth_url = 'https://www.facebook.com/dialog/oauth?scope=email%2Cuser_about_me%2Cuser_birthday%2Cuser_website&redirect_uri=http%3A%2F%2Ftestserver%2Ffacebook%2Fconnect%2F%3Fattempt%3D1&client_id=215464901804004'
if six.PY3:
self.assertEqual(response.redirect_chain[0][1], 302)
else:
self.assertEqual(redirect_url, oauth_url)
def test_connect_redirect_authenticated(self):
# Meanwhile at Facebook they redirect the request
# STEP 2 Authenticated, verify that the connect view redirects to the
# example
self.mock_authenticated()
accepted_url = self.url + \
'?attempt=1&client_id=215464901804004&next=bla®ister_next=%s' % self.example_url
response = self.client.get(accepted_url, follow=True)
redirect_url = response.redirect_chain[0][0]
self.assertEqual(redirect_url, self.absolute_example_url)
# Verify that login_next works
accepted_url = self.url + \
'?attempt=1&client_id=215464901804004&next=bla&login_next=%s' % self.example_url
response = self.client.get(accepted_url, follow=True)
redirect_url = response.redirect_chain[0][0]
self.assertEqual(redirect_url, self.absolute_example_url)
def test_connect_redirect_default(self):
# Now try without next
self.mock_authenticated()
accepted_url = self.url + '?attempt=1&client_id=215464901804004'
response = self.client.get(accepted_url, follow=True)
redirect_url = response.redirect_chain[0][0]
self.assertEqual(redirect_url, self.absolute_default_url)
def test_connect_redirect_not_authenticated(self):
# Meanwhile at Facebook they redirect the request
# STEP 2 Not Authenticated, verify that the connect view redirects to
# the example
accepted_url = self.url + \
'?attempt=1&client_id=215464901804004&next=%s' % self.example_url
response = self.client.get(accepted_url, follow=True)
redirect_url = response.redirect_chain[0][0]
error_url = self.absolute_example_url + '?fb_error_or_cancel=1'
self.assertEqual(redirect_url, error_url)
# Verify that error next also works
accepted_url = self.url + \
'?attempt=1&client_id=215464901804004&next=bla&error_next=%s' % self.example_url
response = self.client.get(accepted_url, follow=True)
redirect_url = response.redirect_chain[0][0]
error_url = self.absolute_example_url + '?fb_error_or_cancel=1'
self.assertEqual(redirect_url, error_url)
def test_connect(self):
'''
Test if we can do logins
django_facebook.connect.connect_user
'''
user = get_user_model().objects.all()[:1][0]
url = self.url
example_url = reverse('facebook_example')
# test registration flow
with patch('django_facebook.views.connect_user', return_value=(CONNECT_ACTIONS.REGISTER, user)) as wrapped_connect:
post_data = dict(
access_token='short_username',
next='%s?register=1' % example_url,
)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(wrapped_connect.call_count, 1)
self.assertIn('register', response.redirect_chain[0][0])
self.assertEqual(response.status_code, 200)
# user register next instead of next
with patch('django_facebook.views.connect_user', return_value=(CONNECT_ACTIONS.REGISTER, user)) as wrapped_connect:
post_data = dict(
access_token='short_username',
register_next='%s?register=1' % example_url
)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(wrapped_connect.call_count, 1)
self.assertIn('register', response.redirect_chain[0][0])
self.assertEqual(response.status_code, 200)
# test login
with patch('django_facebook.views.connect_user', return_value=(CONNECT_ACTIONS.LOGIN, user)) as wrapped_connect:
post_data = dict(
access_token='short_username',
next='%s?loggggg=1' % example_url,
)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(wrapped_connect.call_count, 1)
self.assertIn('?loggggg=1', response.redirect_chain[0][0])
self.assertEqual(response.status_code, 200)
# test connect
with patch('django_facebook.views.connect_user', return_value=(CONNECT_ACTIONS.CONNECT, user)) as wrapped_connect:
post_data = dict(
access_token='short_username',
next='%s?loggggg=1' % example_url
)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(wrapped_connect.call_count, 1)
assert '?loggggg=1' in response.redirect_chain[0][0]
self.assertEqual(response.status_code, 200)
# test connect
from django_facebook import exceptions as facebook_exceptions
profile_error = facebook_exceptions.IncompleteProfileError()
profile_error.form = None
with patch('django_facebook.views.connect_user', return_value=(CONNECT_ACTIONS.REGISTER, user), side_effect=profile_error) as wrapped_connect:
post_data = dict(access_token='short_username',
next='%s?loggggg=1' % example_url)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(wrapped_connect.call_count, 1)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context)
template = self.get_response_template(response)
backend = get_registration_backend()
assert template.name in backend.get_registration_template()
def test_slow_connect(self):
'''
Test if we can do logins
django_facebook.connect.connect_user
'''
url = reverse('facebook_connect')
example_url = reverse('facebook_example')
# test super slow Facebook
errors = [FacebookSSLError(), FacebookURLError(
'<urlopen error _ssl.c:489: The handshake operation timed out>')]
for error in errors:
with patch('django_facebook.views.get_instance_for') as converter:
instance = converter.return_value
instance.is_authenticated = Mock(side_effect=error)
post_data = dict(
access_token='short_username',
next='%s?loggggg=1' % example_url
)
response = self.client.post(url, post_data, follow=True)
self.assertEqual(instance.is_authenticated.call_count, 1)
self.assertTrue(response.context)
assert '?loggggg=1' in response.redirect_chain[0][0]
def get_response_template(self, response):
if hasattr(response, 'template'):
templates = [response.template]
else:
templates = response.templates
template = templates[0]
return template
class TestUserTest(LiveFacebookTest):
def test_create_test_user(self):
# Also, somehow unittest.skip doesnt work with travis ci?
return 'Skipping since you might have created test users manually, lets not delete them :)'
# start by clearing out our test users (maybe this isnt safe to use in testing)
# if other people create test users manualy this could be annoying
app_access_token = FacebookAuthorization.get_cached_app_access_token()
FacebookAuthorization.delete_test_users(app_access_token)
# the permissions for which we want a test user
permissions = ['email', 'publish_actions']
# gets the test user object
test_user = FacebookAuthorization.get_or_create_test_user(
app_access_token, permissions)
graph = test_user.graph()
me = graph.me()
assert me
class ExtendTokenTest(LiveFacebookTest):
def test_extend_token(self):
return 'this doesnt work in travis, but locally its fine... weird'
app_access_token = FacebookAuthorization.get_cached_app_access_token()
test_user = FacebookAuthorization.get_or_create_test_user(
app_access_token)
access_token = test_user.access_token
results = FacebookAuthorization.extend_access_token(access_token)
if 'access_token' not in results:
raise ValueError('we didnt get a fresh token')
class OpenGraphShareTest(FacebookTest):
def setUp(self):
FacebookTest.setUp(self)
user_url = 'http://www.fashiolista.com/style/neni/'
kwargs = dict(item=user_url)
user = get_user_model().objects.all()[:1][0]
profile = try_get_profile(user)
user_or_profile = get_instance_for_attribute(
user, profile, 'facebook_open_graph')
user_or_profile.facebook_open_graph = True
user_or_profile.save()
some_content_type = ContentType.objects.all()[:1][0]
share = OpenGraphShare.objects.create(
user_id=user.id,
facebook_user_id=13123123,
action_domain='fashiolista:follow',
content_type=some_content_type,
object_id=user.id,
)
share.set_share_dict(kwargs)
share.save()
self.share = share
self.share_details = user, profile, share
def test_follow_og_share(self):
user_url = 'http://www.fashiolista.com/style/neni/'
kwargs = dict(item=user_url)
user = get_user_model().objects.all()[:1][0]
from django.contrib.contenttypes.models import ContentType
some_content_type = ContentType.objects.all()[:1][0]
share = OpenGraphShare.objects.create(
user_id=user.id,
facebook_user_id=13123123,
action_domain='fashiolista:follow',
content_type=some_content_type,
object_id=user.id,
)
share.set_share_dict(kwargs)
share.save()
share.send()
def test_follow_og_share_error(self):
'''
A normal OpenFacebook exception, shouldnt reset the new token required
However an OAuthException should set new_token_required to True,
But only if we are indeed failing has_permissions(['publish_actions'])
'''
# utility function for testing purposes
def test_send(error, expected_error_message, expected_new_token, has_permissions=False):
user, profile, share = self.share_details
update_user_attributes(
user, profile, dict(new_token_required=False), save=True)
with mock.patch('open_facebook.api.OpenFacebook') as mocked:
instance = mocked.return_value
instance.set = Mock(side_effect=error)
instance.has_permissions = Mock(return_value=has_permissions)
instance.access_token = get_user_attribute(
user, profile, 'access_token')
share.send(graph=instance)
self.assertEqual(share.error_message, expected_error_message)
self.assertFalse(share.completed_at)
user = get_user_model().objects.get(id=user.id)
if profile:
profile = get_profile_model().objects.get(id=profile.id)
new_token_required = get_user_attribute(
user, profile, 'new_token_required')
self.assertEqual(new_token_required, expected_new_token)
# test with a basic exception, this should reset the new_token_required
test_send(
error=FacebookUnreachable('broken'),
expected_error_message='FacebookUnreachable(\'broken\',)',
expected_new_token=False,
has_permissions=False,
)
# now try with an oAuthException and no permissions
# this should set new_token_required to true
test_send(
error=OAuthException('permissions'),
expected_error_message="OAuthException('permissions',)",
expected_new_token=True,
has_permissions=False,
)
# now an oAuthException, but we already have the permissions
# this means we shouldnt set new_token_required to True
test_send(
error=OAuthException('permissions'),
expected_error_message="OAuthException('permissions',)",
expected_new_token=False,
has_permissions=True,
)
class UserConnectTest(FacebookTest):
'''
Tests the connect user functionality
'''
def test_persistent_graph(self):
request = RequestMock().get('/')
request.session = {}
request.user = AnonymousUser()
graph = get_facebook_graph(access_token='short_username')
FacebookUserConverter(graph)
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(action, CONNECT_ACTIONS.REGISTER)
def test_gender_matching(self):
request = RequestMock().get('/')
request.session = {}
request.user = AnonymousUser()
graph = get_persistent_graph(request, access_token='paul')
converter = FacebookUserConverter(graph)
base_data = converter.facebook_profile_data()
self.assertEqual(base_data['gender'], 'male')
data = converter.facebook_registration_data()
self.assertEqual(data['gender'], 'm')
action, user = connect_user(self.request, facebook_graph=graph)
profile = try_get_profile(user)
gender = get_user_attribute(user, profile, 'gender')
self.assertEqual(gender, 'm')
def test_update_access_token(self):
request = RequestMock().get('/')
request.session = {}
request.user = AnonymousUser()
graph = get_persistent_graph(request, access_token='paul')
action, user = connect_user(self.request, facebook_graph=graph)
first_user_id = user.id
# new token required should start out as False
profile = try_get_profile(user)
new_token_required = get_user_attribute(
user, profile, 'new_token_required')
self.assertEqual(new_token_required, False)
# we manually set it to true
update_user_attributes(
user, profile, dict(new_token_required=True), save=True)
if profile:
profile = get_profile_model().objects.get(id=profile.id)
user = get_user_model().objects.get(id=user.id)
new_token_required = get_user_attribute(
user, profile, 'new_token_required')
self.assertEqual(new_token_required, True)
# another update should however set it back to False
request.facebook = None
graph = get_facebook_graph(request, access_token='paul2')
logger.info('and the token is %s', graph.access_token)
action, user = connect_user(self.request, facebook_graph=graph)
user = get_user_model().objects.get(id=user.id)
self.assertEqual(user.id, first_user_id)
if profile:
profile = get_profile_model().objects.get(id=profile.id)
user = get_user_model().objects.get(id=user.id)
new_token_required = get_user_attribute(
user, profile, 'new_token_required')
self.assertEqual(new_token_required, False)
def test_long_username(self):
request = RequestMock().get('/')
request.session = {}
request.user = AnonymousUser()
graph = get_persistent_graph(request, access_token='long_username')
converter = FacebookUserConverter(graph)
base_data = converter.facebook_registration_data()
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(len(base_data['username']), 30)
self.assertEqual(len(user.username), 30)
self.assertEqual(len(user.first_name), 30)
self.assertEqual(len(user.last_name), 30)
def test_full_connect(self):
# going for a register, connect and login
graph = get_facebook_graph(access_token='short_username')
FacebookUserConverter(graph)
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(action, CONNECT_ACTIONS.REGISTER)
# and now we do a login, not a connect
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
self.request.GET._mutable = True
self.request.GET['connect_facebook'] = 1
action, user = connect_user(
self.request, facebook_graph=graph, connect_facebook=True)
self.assertEqual(action, CONNECT_ACTIONS.CONNECT)
self.request.user = AnonymousUser()
action, user = connect_user(
self.request, facebook_graph=graph, connect_facebook=True)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
def test_parallel_register(self):
'''
Adding some testing for the case when one person tries to register
multiple times in the same second
'''
graph = get_facebook_graph(access_token='short_username')
FacebookUserConverter(graph)
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(action, CONNECT_ACTIONS.REGISTER)
self.request.user.is_authenticated = lambda: False
with patch('django_facebook.connect.authenticate') as patched:
return_sequence = [user, None]
def side(*args, **kwargs):
value = return_sequence.pop()
return value
patched.side_effect = side
with patch('django_facebook.connect._register_user') as patched_register:
patched_register.side_effect = facebook_exceptions.AlreadyRegistered(
'testing parallel registers')
action, user = connect_user(self.request, facebook_graph=graph)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
def test_utf8(self):
graph = get_facebook_graph(access_token='unicode_string')
facebook = FacebookUserConverter(graph)
action, user = connect_user(self.request, facebook_graph=graph)
def test_invalid_token(self):
self.assertRaises(AssertionError,
connect_user, self.request, access_token='invalid')
def test_no_email_registration(self):
from django_facebook import exceptions as facebook_exceptions
self.assertRaises(facebook_exceptions.IncompleteProfileError,
connect_user, self.request, access_token='no_email')
def test_current_user(self):
facebook = get_facebook_graph(access_token='tschellenbach')
action, user = connect_user(self.request, facebook_graph=facebook)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
def test_fb_update_required(self):
def pre_update(sender, user, profile, facebook_data, **kwargs):
user.pre_update_signal = True
Profile = get_profile_model()
user_model = get_user_model()
signals.facebook_pre_update.connect(pre_update, sender=user_model)
facebook = get_facebook_graph(access_token='tschellenbach')
facebook_settings.FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN = True
action, user = connect_user(self.request, facebook_graph=facebook)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
self.assertTrue(hasattr(user, 'pre_update_signal'))
facebook_settings.FACEBOOK_FORCE_PROFILE_UPDATE_ON_LOGIN = False
action, user = connect_user(self.request, facebook_graph=facebook)
self.assertEqual(action, CONNECT_ACTIONS.LOGIN)
self.assertFalse(hasattr(user, 'pre_update_signal'))
def test_new_user(self):
facebook = get_facebook_graph(access_token='new_user')
action, user = connect_user(self.request, facebook_graph=facebook)
def test_short_username(self):
facebook = get_facebook_graph(access_token='short_username')
action, user = connect_user(self.request, facebook_graph=facebook)
self.assertTrue(len(user.username) > 4)
self.assertEqual(action, CONNECT_ACTIONS.REGISTER)
def test_gender(self):
graph = get_facebook_graph(access_token='new_user')
facebook = FacebookUserConverter(graph)
data = facebook.facebook_registration_data()
self.assertEqual(data['gender'], 'm')
def test_double_username(self):
'''
This used to give an error with duplicate usernames
with different capitalization
'''
facebook = get_facebook_graph(access_token='short_username')
action, user = connect_user(self.request, facebook_graph=facebook)
user.username = 'Thierry_schellenbach'
user.save()
self.request.user = AnonymousUser()
facebook = get_facebook_graph(access_token='same_username')
action, new_user = connect_user(self.request, facebook_graph=facebook)
self.assertNotEqual(user.username, new_user.username)
self.assertNotEqual(user.id, new_user.id)
def test_registration_form(self):
'''
Django_facebook should use user supplied registration form if given
'''
from django.conf import settings
if settings.MODE == 'userena':
return
test_form = 'django_facebook.test_utils.forms.SignupForm'
old_setting = facebook_settings.FACEBOOK_REGISTRATION_FORM
facebook_settings.FACEBOOK_REGISTRATION_FORM = test_form
try:
facebook = get_facebook_graph(access_token='short_username')
action, user = connect_user(self.request, facebook_graph=facebook)
# The test form always sets username to test form
self.assertEqual(user.username, 'Test form')
finally:
facebook_settings.FACEBOOK_REGISTRATION_FORM = old_setting
class SimpleRegisterViewTest(FacebookTest):
'''
Even the most simple views will break eventually if they are not tested
'''
def test_registration(self):
pw = 'tester1234'
data = dict(username='testertester', email='tester@testertester.com',
password1=pw, password2=pw)
data['register_next'] = '/?a=bbbbcbbbb'
response = self.client.post('/accounts/register/', data, follow=True)
assert response.redirect_chain, 'we are expecting a redirect!'
for url, status in response.redirect_chain:
if 'bbbbcbbbb' in url:
break
else:
raise ValueError('bbbbcbbbb isnt in %s' % response.redirect_chain)
class AuthBackend(FacebookTest):
def test_auth_backend(self):
# the auth backend
backend = FacebookBackend()
facebook = get_facebook_graph(access_token='new_user')
action, user = connect_user(self.request, facebook_graph=facebook)
facebook_email = user.email
profile = try_get_profile(user)
user_or_profile = get_instance_for_attribute(
user, profile, 'facebook_id')
facebook_id = user_or_profile.facebook_id
auth_user = backend.authenticate(facebook_email=facebook_email)
logger.info('%s %s %s', auth_user.email, user.email, facebook_email)
self.assertEqual(auth_user, user)
auth_user = backend.authenticate(facebook_id=facebook_id)
self.assertEqual(auth_user, user)
auth_user = backend.authenticate(facebook_id=facebook_id,
facebook_email=facebook_email)
self.assertEqual(auth_user, user)
auth_user = backend.authenticate()
self.assertIsNone(auth_user)
class ErrorMappingTest(FacebookTest):
def test_mapping(self):
from open_facebook import exceptions as open_facebook_exceptions
raise_something = partial(FacebookConnection.raise_error, 0,
"(#200) The user hasn't authorized the "
"application to perform this action")
self.assertRaises(open_facebook_exceptions.PermissionException,
raise_something)
class OAuthUrlTest(FacebookTest):
def _test_equal(self, url, output):
converted = cleanup_oauth_url(url)
self.assertEqual(converted, output)
def test_url(self):
url = 'http://www.google.com/'
output = 'http://www.google.com/'
self._test_equal(url, output)
url = 'http://www.google.com/?code=a'
output = 'http://www.google.com/'
self._test_equal(url, output)
url = 'http://www.google.com/?code=a&b=c&d=c'
output = 'http://www.google.com/?b=c&d=c'
self._test_equal(url, output)
class SignalTest(FacebookTest):
'''
Tests that signals fire properly
'''
def test_user_registered_signal(self):
# Ensure user registered, pre update and post update signals fire
def user_registered(sender, user, facebook_data, **kwargs):
user.registered_signal = True
def pre_update(sender, user, profile, facebook_data, **kwargs):
user.pre_update_signal = True
def post_update(sender, user, profile, facebook_data, **kwargs):
user.post_update_signal = True
Profile = get_profile_model()
user_model = get_user_model()
signals.facebook_user_registered.connect(
user_registered, sender=user_model)
signals.facebook_pre_update.connect(pre_update, sender=user_model)
signals.facebook_post_update.connect(post_update, sender=user_model)
graph = get_facebook_graph(access_token='short_username')
facebook = FacebookUserConverter(graph)
user = _register_user(self.request, facebook)
self.assertEqual(hasattr(user, 'registered_signal'), True)
self.assertEqual(hasattr(user, 'pre_update_signal'), True)
self.assertEqual(hasattr(user, 'post_update_signal'), True)
def fake_connect(request, access_tokon, graph):
return ('action', 'user')
class FacebookCanvasMiddlewareTest(FacebookTest):
def setUp(self):
super(FacebookCanvasMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.middleware = FacebookCanvasMiddleWare()
self.session_middleware = SessionMiddleware()
def get_canvas_url(self, data={}):
request = self.factory.post('/', data)
request.META['HTTP_REFERER'] = 'https://apps.facebook.com/canvas/'
self.session_middleware.process_request(request)
return request
def test_referer(self):
# test empty referer
request = self.factory.get('/')
self.assertIsNone(self.middleware.process_request(request))
# test referer not facebook
request = self.factory.get('/')
request.META['HTTP_REFERER'] = 'https://localhost:8000/'
self.assertIsNone(self.middleware.process_request(request))
request = self.get_canvas_url()
response = self.middleware.process_request(request)
self.assertIsInstance(response, ScriptRedirect)
def test_user_denied(self):
request = self.factory.get(
'/?error_reason=user_denied&error=access_denied&error_description=The+user+denied+your+request.')
request.META['HTTP_REFERER'] = 'https://apps.facebook.com/canvas/'
response = self.middleware.process_request(request)
self.assertIsInstance(response, ScriptRedirect)
@patch.object(FacebookAuthorization, 'parse_signed_data')
def test_non_auth_user(self, mocked_method=FacebookAuthorization.parse_signed_data):
mocked_method.return_value = {}
data = {'signed_request':
'dXairHLF8dfUKaL7ZFXaKmTsAglg0EkyHesTLnPcPAE.eyJhbGdvcml0aG0iOiJITUFDLVNIQTI1NiIsImlzc3VlZF9hdCI6MTM1ODA2MTU1MSwidXNlciI6eyJjb3VudHJ5IjoiYnIiLCJsb2NhbGUiOiJlbl9VUyIsImFnZSI6eyJtaW4iOjIxfX19'}
request = self.get_canvas_url(data=data)
response = self.middleware.process_request(request)
self.assertTrue(mocked_method.called)
self.assertIsInstance(response, ScriptRedirect)
@patch('django_facebook.middleware.connect_user', fake_connect)
@patch.object(OpenFacebook, 'permissions')
@patch.object(FacebookAuthorization, 'parse_signed_data')
def test_auth_user(
self, mocked_method_1=FacebookAuthorization.parse_signed_data,
mocked_method_2=OpenFacebook.permissions):
data = {'signed_request':
'd7JQQIfxHgEzLIqJMeU9J5IlLg7shzPJ8DFRF55L52w.eyJhbGdvcml0aG0iOiJITUFDLVNIQTI1NiIsImV4cGlyZXMiOjEzNTgwNzQ4MDAsImlzc3VlZF9hdCI6MTM1ODA2ODU1MCwib2F1dGhfdG9rZW4iOiJBQUFGdk02MWpkT0FCQVBhWkNzR1pDM0dEVFZtdDJCWkFQVlpDc0F0aGNmdXBYUnhMN1cwUHBaQm53OEUwTzBBRVNYNjVaQ0JHdjZpOFRBWGhnMEpzbER5UmtmZUlnYnNHUmV2eHQxblFGZ0hNcFNpeTNWRTB3ZCIsInVzZXIiOnsiY291bnRyeSI6ImJyIiwibG9jYWxlIjoiZW5fVVMiLCJhZ2UiOnsibWluIjoyMX19LCJ1c2VyX2lkIjoiMTAwMDA1MDEyNDY2Nzg1In0'}
request = self.get_canvas_url(data=data)
request.user = AnonymousUser()
mocked_method_1.return_value = {'user_id': '123456',
'oauth_token': 'qwertyuiop'}
mocked_method_2.return_value = facebook_settings.FACEBOOK_DEFAULT_SCOPE
self.assertIsNone(self.middleware.process_request(request))
self.assertTrue(mocked_method_1.called)
|
|
"""
Copyright 2020, CCL Forensics
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import struct
import datetime
import types
import typing
import re
__version__ = "0.1"
__description__ = "Partial reimplementation of the V8 Javascript Object Serialization"
__contact__ = "Alex Caithness"
# TODO: We need to address cyclic references, which are permissible. Probably take the same approach as in ccl_bplist
# and subclass the collection types to resolve references JIT
# See: https://github.com/v8/v8/blob/master/src/objects/value-serializer.cc
__DEBUG = False
def log(msg, debug_only=True):
if not debug_only or __DEBUG:
caller_name = sys._getframe(1).f_code.co_name
caller_line = sys._getframe(1).f_code.co_firstlineno
print(f"{caller_name} ({caller_line}):\t{msg}")
def read_le_varint(stream: typing.BinaryIO) -> typing.Optional[typing.Tuple[int, bytes]]:
# this only outputs unsigned
i = 0
result = 0
underlying_bytes = []
while i < 10: # 64 bit max possible?
raw = stream.read(1)
if len(raw) < 1:
return None
tmp, = raw
underlying_bytes.append(tmp)
result |= ((tmp & 0x7f) << (i * 7))
if (tmp & 0x80) == 0:
break
i += 1
return result, bytes(underlying_bytes)
class _Undefined:
def __bool__(self):
return False
def __eq__(self, other):
if isinstance(other, _Undefined):
return True
return False
def __repr__(self):
return "<Undefined>"
def __str__(self):
return "<Undefined>"
class Constants:
# Constants
kLatestVersion = 13
# version:uint32_t (if at beginning of data, sets version > 0)
token_kVersion = b"\xFF"
# ignore
token_kPadding = b"\0"
# refTableSize:uint32_t (previously used for sanity checks; safe to ignore)
token_kVerifyObjectCount = b"?"
# Oddballs (no data).
token_kTheHole = b"-"
token_kUndefined = b"_"
token_kNull = b"0"
token_kTrue = b"T"
token_kFalse = b"F"
# Number represented as 32-bit integer, ZigZag-encoded
# (like sint32 in protobuf)
token_kInt32 = b"I"
# Number represented as 32-bit unsigned integer, varint-encoded
# (like uint32 in protobuf)
token_kUint32 = b"U"
# Number represented as a 64-bit double.
# Host byte order is used (N.B. this makes the format non-portable).
token_kDouble = b"N"
# BigInt. Bitfield:uint32_t, then raw digits storage.
token_kBigInt = b"Z"
# byteLength:uint32_t, then raw data
token_kUtf8String = b"S"
token_kOneByteString = b"\""
token_kTwoByteString = b"c"
# Reference to a serialized object. objectID:uint32_t
token_kObjectReference = b"^"
# Beginning of a JS object.
token_kBeginJSObject = b"o"
# End of a JS object. numProperties:uint32_t
token_kEndJSObject = b"{"
# Beginning of a sparse JS array. length:uint32_t
# Elements and properties are written as token_key/value pairs, like objects.
token_kBeginSparseJSArray = b"a"
# End of a sparse JS array. numProperties:uint32_t length:uint32_t
token_kEndSparseJSArray = b"@"
# Beginning of a dense JS array. length:uint32_t
# |length| elements, followed by properties as token_key/value pairs
token_kBeginDenseJSArray = b"A"
# End of a dense JS array. numProperties:uint32_t length:uint32_t
token_kEndDenseJSArray = b"$"
# Date. millisSinceEpoch:double
token_kDate = b"D"
# Boolean object. No data.
token_kTrueObject = b"y"
token_kFalseObject = b"x"
# Number object. value:double
token_kNumberObject = b"n"
# BigInt object. Bitfield:uint32_t, then raw digits storage.
token_kBigIntObject = b"z"
# String object, UTF-8 encoding. byteLength:uint32_t, then raw data.
token_kStringObject = b"s"
# Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data
# flags:uint32_t.
token_kRegExp = b"R"
# Beginning of a JS map.
token_kBeginJSMap = b";"
# End of a JS map. length:uint32_t.
token_kEndJSMap = b":"
# Beginning of a JS set.
token_kBeginJSSet = b"'"
# End of a JS set. length:uint32_t.
token_kEndJSSet = b","
# Array buffer. byteLength:uint32_t, then raw data.
token_kArrayBuffer = b"B"
# Array buffer (transferred). transferID:uint32_t
token_kArrayBufferTransfer = b"t"
# View into an array buffer.
# subtag:ArrayBufferViewTag, byteOffset:uint32_t, byteLength:uint32_t
# For typed arrays, byteOffset and byteLength must be divisible by the size
# of the element.
# Note: token_kArrayBufferView is special, and should have an ArrayBuffer (or an
# ObjectReference to one) serialized just before it. This is a quirk arising
# from the previous stack-based implementation.
token_kArrayBufferView = b"V"
# Shared array buffer. transferID:uint32_t
token_kSharedArrayBuffer = b"u"
# A wasm module object transfer. next value is its index.
token_kWasmModuleTransfer = b"w"
# The delegate is responsible for processing all following data.
# This "escapes" to whatever wire format the delegate chooses.
token_kHostObject = b"\\"
# A transferred WebAssembly.Memory object. maximumPages:int32_t, then by
# SharedArrayBuffer tag and its data.
token_kWasmMemoryTransfer = b"m"
# A list of (subtag: ErrorTag, [subtag dependent data]). See ErrorTag for
# details.
token_kError = b"r"
# The following tags are reserved because they were in use in Chromium before
# the token_kHostObject tag was introduced in format version 13, at
# v8 refs/heads/master@{#43466}
# chromium/src refs/heads/master@{#453568}
#
# They must not be reused without a version check to prevent old values from
# starting to deserialize incorrectly. For simplicity, it's recommended to
# avoid them altogether.
#
# This is the set of tags that existed in SerializationTag.h at that time and
# still exist at the time of this writing (i.e., excluding those that were
# removed on the Chromium side because there should be no real user data
# containing them).
#
# It might be possible to also free up other tags which were never persisted
# (e.g. because they were used only for transfer) in the future.
token_kLegacyReservedMessagePort = b"M"
token_kLegacyReservedBlob = b"b"
token_kLegacyReservedBlobIndex = b"i"
token_kLegacyReservedFile = b"f"
token_kLegacyReservedFileIndex = b"e"
token_kLegacyReservedDOMFileSystem = b"d"
token_kLegacyReservedFileList = b"l"
token_kLegacyReservedFileListIndex = b"L"
token_kLegacyReservedImageData = b"#"
token_kLegacyReservedImageBitmap = b"g"
token_kLegacyReservedImageBitmapTransfer = b"G"
token_kLegacyReservedOffscreenCanvas = b"H"
token_kLegacyReservedCryptoKey = b"token_k"
token_kLegacyReservedRTCCertificate = b"token_k"
class ArrayBufferViewTag:
tag_kInt8Array = "b"
tag_kUint8Array = "B"
tag_kUint8ClampedArray = "C"
tag_kInt16Array = "w"
tag_kUint16Array = "W"
tag_kInt32Array = "d"
tag_kUint32Array = "D"
tag_kFloat32Array = "f"
tag_kFloat64Array = "F"
tag_kBigInt64Array = "q"
tag_kBigUint64Array = "Q"
tag_kDataView = "?"
STRUCT_LOOKUP = types.MappingProxyType({
tag_kInt8Array: "b",
tag_kUint8Array: "B",
tag_kUint8ClampedArray: "B",
tag_kInt16Array: "h",
tag_kUint16Array: "H",
tag_kInt32Array: "i",
tag_kUint32Array: "I",
tag_kFloat32Array: "f",
tag_kFloat64Array: "d",
tag_kBigInt64Array: "q",
tag_kBigUint64Array: "Q",
tag_kDataView: "c"
})
class Deserializer:
Undefined = _Undefined()
__ODDBALLS = {
Constants.token_kUndefined: Undefined,
Constants.token_kTheHole: Undefined,
Constants.token_kNull: None,
Constants.token_kTrue: True,
Constants.token_kFalse: False,
}
__WRAPPED_PRIMITIVES = {
Constants.token_kTrueObject,
Constants.token_kFalseObject,
Constants.token_kNumberObject,
Constants.token_kBigIntObject,
Constants.token_kStringObject
}
def __init__(self, stream: typing.BinaryIO, host_object_delegate: typing.Callable,
*, is_little_endian=True, is_64bit=True):
self._f = stream
self._host_object_delegate = host_object_delegate
self._endian = "<" if is_little_endian else ">"
self._pointer_size = 8 if is_64bit else 4
self._next_id = 0
self._objects = []
self.version = self._read_header()
def _read_raw(self, length: int) -> bytes:
start = self._f.tell()
raw = self._f.read(length)
if len(raw) != length:
raise ValueError(f"Could not read all data at offset {start}; wanted {length}; got {len(raw)}")
return raw
def _read_le_varint(self) -> typing.Optional[typing.Tuple[int, bytes]]:
return read_le_varint(self._f)
def _read_zigzag(self) -> int:
unsigned = self._read_le_varint()[0]
if unsigned & 1:
return -(unsigned >> 1)
else:
return unsigned >> 1
def _read_double(self) -> float:
return struct.unpack(f"{self._endian}d", self._read_raw(8))[0]
# def _read_uint32(self) -> int:
# return self._read_le_varint()
# def _read_uint64(self) -> int:
# return self._read_le_varint()
def _read_bigint(self) -> int:
size_flag = self._read_le_varint()[0]
is_neg = size_flag & 0x01
size = size_flag >> 4
raw = self._read_raw(size * self._pointer_size)
value = int.from_bytes(raw, "big" if self._endian == ">" else "little", signed=False)
if is_neg:
value = -value
return value
def _read_utf8_string(self) -> str:
length = self._read_le_varint()[0]
return self._read_raw(length).decode("utf8")
def _read_one_byte_string(self) -> typing.AnyStr:
length = self._read_le_varint()[0]
# I think this can be used to store raw 8-bit data, so return ascii if we can, otherwise bytes
raw = self._read_raw(length) # .decode("ascii")
try:
result = raw.decode("ascii")
except UnicodeDecodeError:
result = raw
return result
def _read_two_byte_string(self) -> str:
length = self._read_le_varint()[0]
return self._read_raw(length).decode("utf-16-le") # le?
def _read_string(self) -> str:
if self.version < 12:
return self._read_utf8_string()
value = self._read_object()
assert isinstance(value, str)
return value
def _read_object_by_reference(self) -> typing.Any:
ref_id = self._read_le_varint()[0]
return self._objects[ref_id]
def _read_tag(self) -> bytes:
while True:
t = self._f.read(1)
if t != Constants.token_kPadding:
return t
def _peek_tag(self) -> bytes:
start = self._f.tell()
tag = self._read_tag()
self._f.seek(start, 0)
return tag
def _read_date(self) -> datetime.datetime:
x = self._read_double()
result = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=x)
self._objects.append(result)
return result
def _read_js_regex(self) -> typing.Pattern:
log(f"Reading js regex properties at {self._f.tell()}")
pattern = self._read_string()
flags = self._read_le_varint()
# TODO: Flags?
regex = re.compile(pattern)
self._objects.append(regex)
return regex
def _read_js_object_properties(self, end_tag) -> typing.Iterable[typing.Tuple[typing.Any, typing.Any]]:
log(f"Reading object properties at {self._f.tell()} with end tag: {end_tag}")
while True:
if self._peek_tag() == end_tag:
log(f"Object end at offset {self._f.tell()}")
break
key = self._read_object()
value = self._read_object()
yield key, value
assert self._read_tag() == end_tag
def _read_js_object(self) -> dict:
log(f"Reading js object properties at {self._f.tell()}")
result = {}
self._objects.append(result)
for key, value in self._read_js_object_properties(Constants.token_kEndJSObject):
result[key] = value
# while True:
# if self._peek_tag() == end_tag:
# log(f"Object end at offset {self._f.tell()}")
# break
# key = self._read_object()
# value = self._read_object()
# result[key] = value
#
# assert self._read_tag() == end_tag
property_count = self._read_le_varint()[0]
log(f"Actual property count: {len(result)}; stated property count: {property_count}")
if len(result) != property_count:
raise ValueError("Property count mismatch")
return result
def _read_js_sparse_array(self) -> list:
log(f"Reading js sparse array properties at {self._f.tell()}")
# TODO: implement a sparse list so that this isn't so horribly inefficient
length = self._read_le_varint()[0]
result = [None for _ in range(length)]
self._objects.append(result)
sparse_object = self._read_js_object_properties(Constants.token_kEndSparseJSArray)
prop_count = 0
for key, value in sparse_object:
i = int(key)
result[i] = value
prop_count += 1
expected_num_properties = self._read_le_varint()[0]
log(f"Actual property count: {prop_count}; stated property count: {expected_num_properties}")
if prop_count != expected_num_properties:
raise ValueError("Property count mismatch")
expected_length = self._read_le_varint()[0] # TODO: should this be checked?
return result
def _read_js_dense_array(self) -> list:
log(f"Reading js dense array properties at {self._f.tell()}")
length = self._read_le_varint()[0]
result = [None for _ in range(length)]
self._objects.append(result)
for i in range(length):
result[i] = self._read_object()
# And then there's a sparse bit maybe?
sparse_object = self._read_js_object_properties(Constants.token_kEndDenseJSArray)
prop_count = 0
for key, value in sparse_object:
i = int(key)
result[i] = value
prop_count += 1
expected_num_properties = self._read_le_varint()[0]
log(f"Actual property count: {prop_count}; stated property count: {expected_num_properties}")
if prop_count != expected_num_properties:
raise ValueError("Property count mismatch")
expected_length = self._read_le_varint()[0] # TODO: should this be checked?
return result
def _read_js_map(self) -> dict:
log(f"Reading js map at {self._f.tell()}")
result = {}
self._objects.append(result)
while True:
if self._peek_tag() == Constants.token_kEndJSMap:
log(f"End of map at {self._f.tell()}")
break
key = self._read_object()
value = self._read_object()
result[key] = value
assert self._read_tag() == Constants.token_kEndJSMap
expected_length = self._read_le_varint()[0]
log(f"Actual map item count: {len(result) * 2}; stated map item count: {expected_length}")
if expected_length != len(result) * 2:
raise ValueError("Map count mismatch")
return result
def _read_js_set(self) -> set:
log(f"Reading js set properties at {self._f.tell()}")
result = set()
self._objects.append(result)
while True:
if self._peek_tag() == Constants.token_kEndJSSet:
log(f"End of set at {self._f.tell()}")
break
result.add(self._read_object())
assert self._read_tag() == Constants.token_kEndJSSet
expected_length = self._read_le_varint()[0]
log(f"Actual set item count: {len(result)}; stated set item count: {expected_length}")
if expected_length != len(result):
raise ValueError("Set count mismatch")
return result
def _read_js_arraybuffer(self) -> bytes:
length = self._read_le_varint()[0]
raw = self._read_raw(length)
self._objects.append(raw)
return raw
def _wrap_js_array_buffer_view(self, raw: bytes) -> tuple:
if not isinstance(raw, bytes):
raise TypeError("Only bytes should be passed to be wrapped in a buffer view")
log(f"Wrapping in ArrayBufferView at offset {self._f.tell()}")
tag = chr(self._read_le_varint()[0])
byte_offset = self._read_le_varint()[0]
byte_length = self._read_le_varint()[0]
if byte_offset + byte_length > len(raw):
raise ValueError("Not enough data in the raw data to hold the defined data")
log(f"ArrayBufferView: tag: {tag}; byte_offset: {byte_offset}; byte_length: {byte_length}")
fmt = ArrayBufferViewTag.STRUCT_LOOKUP[tag]
element_length = struct.calcsize(fmt)
if byte_length % element_length != 0:
raise ValueError(f"ArrayBufferView doesn't fit nicely: byte_length: {byte_length}; "
f"element_length: {element_length}")
element_count = byte_length // element_length
return struct.unpack(f"{self._endian}{element_count}{fmt}", raw[byte_offset: byte_offset + byte_length])
def _read_host_object(self) -> typing.Any:
result = self._host_object_delegate(self._f)
self._objects.append(result)
return result
def _not_implemented(self):
raise NotImplementedError("Todo")
def _read_object_internal(self) -> typing.Tuple[bytes, typing.Any]:
tag = self._read_tag()
log(f"Offset: {self._f.tell()}; Tag: {tag}")
if tag in Deserializer.__ODDBALLS:
return tag, Deserializer.__ODDBALLS[tag]
func = {
Constants.token_kTrueObject: lambda: Deserializer.__ODDBALLS[Constants.token_kTrue],
Constants.token_kFalseObject: lambda: Deserializer.__ODDBALLS[Constants.token_kFalse],
Constants.token_kNumberObject: self._read_double,
Constants.token_kUint32: self._read_le_varint,
Constants.token_kInt32: self._read_zigzag,
Constants.token_kDouble: self._read_double,
Constants.token_kDate: self._read_date,
Constants.token_kBigInt: self._read_bigint,
Constants.token_kBigIntObject: self._read_bigint,
Constants.token_kUtf8String: self._read_utf8_string,
Constants.token_kOneByteString: self._read_one_byte_string,
Constants.token_kTwoByteString: self._read_two_byte_string,
Constants.token_kStringObject: self._read_string,
Constants.token_kRegExp: self._read_js_regex,
Constants.token_kObjectReference: self._read_object_by_reference,
Constants.token_kBeginJSObject: self._read_js_object,
Constants.token_kBeginSparseJSArray: self._read_js_sparse_array,
Constants.token_kBeginDenseJSArray: self._read_js_dense_array,
Constants.token_kBeginJSMap: self._read_js_map,
Constants.token_kBeginJSSet: self._read_js_set,
Constants.token_kArrayBuffer: self._read_js_arraybuffer,
Constants.token_kSharedArrayBuffer: self._not_implemented, # and probably never, as it can't be pulled from the data I think?
Constants.token_kArrayBufferTransfer: self._not_implemented,
Constants.token_kError: self._not_implemented,
Constants.token_kWasmModuleTransfer: self._not_implemented,
Constants.token_kWasmMemoryTransfer: self._not_implemented,
Constants.token_kHostObject: self._read_host_object,
}.get(tag)
if func is None:
raise ValueError(f"Unknown tag {tag}")
value = func()
if tag in Deserializer.__WRAPPED_PRIMITIVES:
self._objects.append(value)
return tag, value
def _read_object(self) -> typing.Any:
log(f"Read object at offset: {self._f.tell()}")
tag, o = self._read_object_internal()
if self._peek_tag() == Constants.token_kArrayBufferView:
assert self._read_tag() == Constants.token_kArrayBufferView
o = self._wrap_js_array_buffer_view(o)
return o
def _read_header(self) -> int:
tag = self._read_tag()
if tag != Constants.token_kVersion:
raise ValueError("Didn't get version tag in the header")
version = self._read_le_varint()[0]
return version
def read(self) -> typing.Any:
return self._read_object()
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generator helpers.
Produces pieces of generated code.
"""
from clif.python import astutils
from clif.python import postconv
from clif.python import slots
VERSION = '0.3' # CLIF generated API version. Pure informative.
I = ' '
def TopologicalSortSimple(ideps):
"""Simple topological sort working on sequence of integer indices."""
# Returns permutation indices (list of integers).
# Using variable names `cons` for dependent, `prod` for dependency
# (consumer, producer) to increase readability.
# cons is implied by the index into ideps.
# prod is the element of ideps (integer or None).
# This implies that each cons can only have one or no prod.
# Example: ideps = [2, None, 1]
# Read as:
# 0 depends on 2
# 1 has no dependency
# 2 depends on 1
# Expected output permutation: [1, 2, 0]
# The output permutation guarantees that prod appears before cons.
# Recursive implementation, subject to maximum recursion limit
# (sys.getrecursionlimit(), usually 1000).
permutation = []
permutation_set = set()
def FollowDeps(root, cons):
"""Recursively follows dependencies."""
if cons in permutation_set:
return
prod = ideps[cons]
if prod is not None:
if prod < 0:
raise ValueError(
'Negative value in ideps: ideps[%s] = %s' % (cons, prod))
if prod >= len(ideps):
raise ValueError(
'Value in ideps exceeds its length: ideps[%s] = %s >= %s'
% (cons, prod, len(ideps)))
if prod == cons:
raise ValueError(
'Trivial cyclic dependency in ideps: ideps[%s] = %s'
% (cons, prod))
if prod == root:
raise ValueError(
'Cyclic dependency in ideps: following dependencies from'
' %s leads back to %s.' % (root, root))
FollowDeps(root, prod)
permutation.append(cons)
permutation_set.add(cons)
for cons in range(len(ideps)):
FollowDeps(cons, cons)
assert len(permutation) == len(ideps)
return permutation
def WriteTo(channel, lines):
for s in lines:
channel.write(s)
channel.write('\n')
def Headlines(src_file, hdr_files=(), sys_hdr_files=(), open_ns=None):
"""Generate header comment and #includes.
Args:
src_file: str - full name of the source file (C++ header)
hdr_files: [str] - additional c++ headers to #include "str"
If the first name is PYTHON, #include <Python.h>.
If str == PYOBJ, forward declare PyObject.
sys_hdr_files: set(str) - additional c++ headers to #include <str>
open_ns: str - emit namespace open_ns if not empty.
Yields:
source code lines
"""
yield '/' * 70
yield '// This file was automatically generated by PyCLIF.'
yield '// Version %s' % VERSION
yield '/' * 70
if src_file:
yield '// source: %s' % src_file
yield ''
python_h = False
if hdr_files[:1] == ['PYTHON']:
python_h = True
yield '#include <Python.h>'
del hdr_files[0]
for h in sys_hdr_files:
if h:
yield '#include <%s>' % h
for h in hdr_files:
if h == 'PYOBJ' and not python_h:
yield ''
yield '// Forward "declare" PyObject (instead of #include <Python.h>)'
yield 'struct _object; typedef _object PyObject;'
elif h:
yield '#include "%s"' % h
if open_ns:
yield ''
yield OpenNs(open_ns)
def OpenNs(namespace):
namespace = (namespace or 'clif').strip(':')
return ' '.join('namespace %s {' % ns for ns in namespace.split('::'))
def CloseNs(namespace):
namespace = (namespace or 'clif').strip(':')
return '} '*(1+namespace.count('::'))+' // namespace '+namespace
def TypeConverters(type_namespace, types, *gen_cvt_args):
"""Generate type converters for types in type_namespace."""
type_namespace = type_namespace or 'clif'
yield ''
yield OpenNs(type_namespace)
if type_namespace != 'clif':
yield 'using namespace ::clif;'
yield 'using ::clif::Clif_PyObjAs;'
yield 'using ::clif::Clif_PyObjFrom;'
for t in types:
for s in t.GenConverters(*gen_cvt_args):
yield s
yield ''
yield CloseNs(type_namespace)
def _DefLine(pyname, cname, meth, doc):
if 'KEYWORD' in meth or 'NOARGS' in meth:
cname = '(PyCFunction)'+cname
if doc is None:
doc = 'nullptr'
else:
doc = '"%s"' % doc
return '{"%s", %s, %s, %s}' % (pyname, cname, meth, doc)
def _DefTable(ctype, cname, lines):
yield 'static %s %s[] = {' % (ctype, cname)
for p in lines:
yield I+_DefLine(*p)+','
yield I+'{}'
yield '};'
class _MethodDef(object):
name = 'MethodsStaticAlloc'
def __call__(self, methods):
yield ''
for s in _DefTable('PyMethodDef', self.name, methods):
yield s
MethodDef = _MethodDef() # pylint: disable=invalid-name
class _GetSetDef(object):
# pylint: disable=missing-class-docstring
name = 'Properties'
def __call__(self, properties, enable_instance_dict):
props = properties
if enable_instance_dict:
props = [
('__dict__',
'pyclif_instance_dict_get',
'pyclif_instance_dict_set',
None)] + props
for s in _DefTable('PyGetSetDef', 'Properties', props):
yield s
GetSetDef = _GetSetDef() # pylint: disable=invalid-name
def _TypesInitInDependencyOrder(types_init, raise_if_reordering=False):
"""Yields type_init items in dependency order: base classes before derived."""
cppname_indices = {}
for index, (cppname, _, _, _) in enumerate(types_init):
cppname_indices[cppname] = index
assert len(cppname_indices) == len(types_init)
ideps = []
for cppname, _, wrapped_base, _ in types_init:
if wrapped_base is not None and wrapped_base not in cppname_indices:
# INDIRECT DETECTION. Considering current development plans, this code
# generator is not worth more effort detecting the issue in a more direct
# way. This is still far better than crashing with a KeyError, or failing
# at compile time.
raise NameError(
'A .clif file is missing a Python-style `from ... import` for a'
' base class declared in another header (go/pyclif#pyimport):'
' wrapped_derived=%s, wrapped_base=%s' % (cppname, wrapped_base))
ideps.append(
None if wrapped_base is None else
cppname_indices[wrapped_base])
permutation = TopologicalSortSimple(ideps)
if raise_if_reordering: # For development / debugging.
if list(sorted(permutation)) != permutation:
msg = [
'Derived class appearing before base in .clif file: %s'
% str(permutation)]
for cppname, _, wrapped_base, _ in types_init:
msg.append(' %s -> %s' % (cppname, wrapped_base))
raise RuntimeError('\n'.join(msg))
for index in permutation:
yield types_init[index]
def ReadyFunction(types_init):
"""Generate Ready() function to call PyType_Ready for wrapped types."""
yield ''
yield 'bool Ready() {'
have_modname = False
pybases = set()
last_pybase = ''
for cppname, base, wrapped_base, _ in _TypesInitInDependencyOrder(types_init):
yield I+'%s =' % cppname
yield I+'%s::_build_heap_type();' % cppname.rsplit('::', 1)[0]
if base:
fq_name, toplevel_fq_name = base
# |base| is a fully qualified Python name.
# The caller ensures we have only one Python base per each class.
if base == last_pybase:
yield I+'Py_INCREF(base_cls);'
else:
type_prefix = '' if pybases else 'PyObject* '
if toplevel_fq_name:
yield I+('%sbase_cls = ImportFQName("%s", "%s");' %
(type_prefix, fq_name, toplevel_fq_name))
else:
yield I+('%sbase_cls = ImportFQName("%s");' %
(type_prefix, fq_name))
if base not in pybases:
yield I+'if (base_cls == nullptr) return false;'
yield I+'if (!PyObject_TypeCheck(base_cls, &PyType_Type)) {'
yield I+I+'Py_DECREF(base_cls);'
yield I+I+(
'PyErr_SetString(PyExc_TypeError, "Base class %s is not a '
'new style class inheriting from object.");' % fq_name)
yield I+I+'return false;'
yield I+'}'
yield I+cppname + '->tp_base = %s(base_cls);' % _Cast('PyTypeObject')
if base not in pybases:
yield I+'// Check that base_cls is a *statically* allocated PyType.'
yield I+'if (%s->tp_base->tp_alloc == PyType_GenericAlloc) {' % cppname
yield I+I+'Py_DECREF(base_cls);'
yield I+I+('PyErr_SetString(PyExc_TypeError, "Base class %s is a'
' dynamic (Python defined) class.");' % fq_name)
yield I+I+'return false;'
yield I+'}'
last_pybase = base
pybases.add(base)
elif wrapped_base:
# base is Python wrapper type in a C++ class namespace defined locally.
yield I+'Py_INCREF(%s);' % wrapped_base
yield I+'%s->tp_base = %s;' % (cppname, wrapped_base)
yield I+'if (PyType_Ready(%s) < 0) return false;' % cppname
if not have_modname:
yield I+'PyObject *modname = PyUnicode_FromString(ThisModuleName);'
yield I+'if (modname == nullptr) return false;'
have_modname = True
yield I+('PyObject_SetAttrString((PyObject *) %s, "__module__", modname);'
% cppname)
yield I+'Py_INCREF(%s); // For PyModule_AddObject to steal.' % cppname
yield I+'return true;'
yield '}'
def InitFunction(doc, meth_ref, init, dict_):
"""Generate a function to create the module and initialize it."""
yield ''
yield 'static struct PyModuleDef Module = {'
yield I+'PyModuleDef_HEAD_INIT,'
yield I+'ThisModuleName,'
yield I+'"%s", // module doc' % doc
yield I+'-1, // module keeps state in global variables'
yield I+meth_ref+','
yield I+'nullptr, // m_slots a.k.a. m_reload'
yield I+'nullptr, // m_traverse'
yield I+'ClearImportCache // m_clear'
yield '};'
yield ''
yield 'PyObject* Init() {'
yield I+'PyObject* module = PyModule_Create(&Module);'
yield I+'if (!module) return nullptr;'
init_needs_err = False
for s in init:
assert ' return' not in s, 'use "goto err;" to handle errors'
if ' err;' in s: init_needs_err = True
yield I+s
for pair in dict_:
yield I+'if (PyModule_AddObject(module, "%s", %s) < 0) goto err;' % pair
yield I+'return module;'
if init_needs_err or dict_:
yield 'err:'
yield I+'Py_DECREF(module);'
yield I+'return nullptr;'
yield '}'
def PyModInitFunction(init_name='', modname='', ns=''):
"""Generate extension module init function."""
assert (init_name or modname) and not (init_name and modname) # xor
name = init_name or ('PyInit_' + modname)
yield ''
yield 'PyMODINIT_FUNC %s(void) {' % name
yield I+'if (!%s::Ready()) return nullptr;' % ns
yield I+'return %s::Init();' % ns
yield '}'
def WrapperClassDef(name, ctype, cname, is_iter, has_iter, iter_ns,
enable_instance_dict):
"""Generate wrapper class."""
assert not (has_iter and is_iter)
yield ''
yield 'struct %s {' % name
yield I+'PyObject_HEAD'
if is_iter:
assert not enable_instance_dict
yield I+'iterator iter;'
else:
yield I+'::clif::Instance<%s> cpp;' % ctype
yield I+'PyObject* instance_dict = nullptr;'
yield I+'PyObject* weakrefs = nullptr;'
yield '};'
if has_iter:
yield ''
yield 'namespace %s {' % iter_ns
yield 'typedef ::clif::Iterator<%s, %s> iterator;' % (cname, has_iter)
yield '}'
def VirtualOverriderClass(name, pyname, cname, cfqname, isabstract, idfunc,
pcfunc, vfuncs):
"""Generate a derived redirector class."""
yield ''
# Unfortunately the multiple-inheritance order here matters, probably caused
# by one or more improper `reinterpret_cast`s.
yield 'struct %s : %s, PyObjRef {' % (name, cname)
yield I+'using %s;' % cfqname
for f in vfuncs:
for s in _VirtualFunctionCall(
idfunc(f.name.cpp_name), f, pyname, isabstract, pcfunc):
yield s
yield '};'
def TypeObject(ht_qualname, tracked_slot_groups,
tp_slots, pyname, ctor, wname, fqclassname,
abstract, iterator, trivial_dtor, subst_cpp_ptr,
enable_instance_dict, cpp_has_ext_def_ctor):
"""Generate PyTypeObject methods and table.
Args:
ht_qualname: str - e.g. Struct or Outer.Inner
tracked_slot_groups: dict - from gen.GenSlots() call
tp_slots: dict - values for PyTypeObject slots
pyname: str - Python class name
ctor: str - (WRAPped/DEFault/None) type of generated ctor
wname: str - C++ wrapper class name
fqclassname: str - FQ C++ class (being wrapped) name
abstract: bool - wrapped C++ class is abstract
iterator: str - C++ iterator object if wrapping an __iter__ class else None
trivial_dtor: bool - if C++ destructor is trivial, no need to allow threads
subst_cpp_ptr: str - C++ "replacement" class (being wrapped) if any
enable_instance_dict: bool - add __dict__ to instance
cpp_has_ext_def_ctor: bool - if the C++ class has extended ctor
Yields:
Source code for PyTypeObject and tp_alloc / tp_init / tp_free methods.
"""
# NOTE: tracked_slot_groups['tp_slots'] and tp_group are similar but
# NOT identical. tp_group has additional customizations.
if ctor:
yield ''
yield '// %s __init__' % pyname
yield 'static int _ctor(PyObject* self, PyObject* args, PyObject* kw);'
if not iterator:
yield ''
yield '// %s __new__' % pyname
yield 'static PyObject* _new(PyTypeObject* type, Py_ssize_t nitems);'
tp_slots['tp_alloc'] = '_new'
tp_slots['tp_new'] = 'PyType_GenericNew'
yield ''
yield '// %s __del__' % pyname
# Use dtor for dynamic types (derived) to wind down malloc'ed C++ obj, so
# the C++ dtors are run.
tp_slots['tp_dealloc'] = '_dtor'
yield 'static void _dtor(PyObject* self) {'
if not iterator:
yield I+'if (%s(self)->weakrefs) {' % _Cast(wname)
yield I+I+'PyObject_ClearWeakRefs(self);'
yield I+'}'
if iterator or not trivial_dtor:
yield I+'Py_BEGIN_ALLOW_THREADS'
if iterator:
yield I+iterator+'.~iterator();'
else:
# Using ~Instance() leads to AddressSanitizer: heap-use-after-free.
yield I+'%s(self)->cpp.Destruct();' % _Cast(wname)
if iterator or not trivial_dtor:
yield I+'Py_END_ALLOW_THREADS'
if not iterator and enable_instance_dict:
yield I+'Py_CLEAR(%s(self)->instance_dict);' % _Cast(wname)
yield I+'Py_TYPE(self)->tp_free(self);'
yield '}'
if not iterator:
# Use delete for static types (not derived), allocated with _new.
tp_slots['tp_free'] = '_del'
yield ''
yield 'static void _del(void* self) {'
yield I+'delete %s(self);' % _Cast(wname)
yield '}'
tp_slots['tp_init'] = '_ctor' if ctor else 'Clif_PyType_Inconstructible'
tp_slots['tp_basicsize'] = 'sizeof(%s)' % wname
tp_slots['tp_itemsize'] = tp_slots['tp_version_tag'] = '0'
tp_slots['tp_dictoffset'] = tp_slots['tp_weaklistoffset'] = '0'
tp_slots['tp_flags'] = ' | '.join(tp_slots['tp_flags'])
if not tp_slots.get('tp_doc'):
tp_slots['tp_doc'] = '"CLIF wrapper for %s"' % fqclassname
wtype = '%s_Type' % wname
yield ''
yield 'PyTypeObject* %s = nullptr;' % wtype
yield ''
yield 'static PyTypeObject* _build_heap_type() {'
# http://third_party/pybind11/include/pybind11/detail/class.h?l=571&rcl=276599738
# was used as a reference for the code generated here.
yield I+'PyHeapTypeObject *heap_type ='
yield I+I+I+'(PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);'
yield I+'if (!heap_type)'
yield I+I+'return nullptr;'
# ht_qualname requires Python >= 3.3 (alwyas true for PyCLIF).
yield I+'heap_type->ht_qualname = (PyObject *) PyUnicode_FromString('
yield I+I+I+'"%s");' % ht_qualname
# Following the approach of pybind11 (ignoring the Python docs).
yield I+'Py_INCREF(heap_type->ht_qualname);'
yield I+'heap_type->ht_name = heap_type->ht_qualname;'
yield I+'PyTypeObject *ty = &heap_type->ht_type;'
yield I+'ty->tp_as_number = &heap_type->as_number;'
yield I+'ty->tp_as_sequence = &heap_type->as_sequence;'
yield I+'ty->tp_as_mapping = &heap_type->as_mapping;'
yield '#if PY_VERSION_HEX >= 0x03050000'
yield I+'ty->tp_as_async = &heap_type->as_async;'
yield '#endif'
for s in slots.GenTypeSlotsHeaptype(tracked_slot_groups, tp_slots):
yield s
if not iterator:
if enable_instance_dict:
yield (I+'pyclif_instance_dict_enable(ty, offsetof(%s, instance_dict));'
% wname)
yield I+'ty->tp_weaklistoffset = offsetof(wrapper, weakrefs);'
yield I+'return ty;'
yield '}'
if ctor:
yield ''
yield 'static int _ctor(PyObject* self, PyObject* args, PyObject* kw) {'
if abstract:
yield I+'if (Py_TYPE(self) == %s) {' % wtype
yield I+I+'return Clif_PyType_Inconstructible(self, args, kw);'
yield I+'}'
cpp = '%s(self)->cpp' % _Cast(wname)
if ctor == 'DEF':
# Skip __init__ if it's a METH_NOARGS.
yield I+('if ((args && PyTuple_GET_SIZE(args) != 0) ||'
' (kw && PyDict_Size(kw) != 0)) {')
yield I+I+('PyErr_SetString(PyExc_TypeError, "%s takes no arguments");' %
pyname)
yield I+I+'return -1;'
yield I+'}'
# We have been lucky so far because NULL initialization of clif::Instance
# object is equivalent to constructing it with the default constructor.
# (NULL initialization happens in PyType_GenericAlloc).
# We don't have a place to call placement new. __init__ (and so _ctor) can
# be called many times and we have no way to ensure the previous object is
# destructed properly (it may be NULL or new initialized).
yield I+'%s = ::clif::MakeShared<%s>();' % (cpp,
subst_cpp_ptr or fqclassname)
if subst_cpp_ptr:
yield I+'%s->::clif::PyObjRef::Init(self);' % cpp
yield I+'return 0;'
else: # ctor is WRAP (holds 'wrapper name')
if cpp_has_ext_def_ctor:
yield I+('if ((args && PyTuple_GET_SIZE(args) != 0) ||'
' (kw && PyDict_Size(kw) != 0)) {')
yield I+I+(
'PyErr_SetString(PyExc_TypeError, "%s takes no arguments");' %
pyname)
yield I+I+'return -1;'
yield I+'}'
yield I+'PyObject* init = %s(self);' % ctor
else:
yield I+'PyObject* init = %s(self, args, kw);' % ctor
if subst_cpp_ptr:
yield I+'if (!init) return -1;'
yield I+'Py_DECREF(init);'
yield I+'%s->::clif::PyObjRef::Init(self);' % cpp
yield I+'return 0;'
else:
yield I+'Py_XDECREF(init);'
yield I+'return init? 0: -1;'
yield '}'
if not iterator:
yield ''
yield 'static PyObject* _new(PyTypeObject* type, Py_ssize_t nitems) {'
yield I+'DCHECK(nitems == 0);'
yield I+'%s* wobj = new %s;' % (wname, wname)
if enable_instance_dict:
yield I+'wobj->instance_dict = nullptr;'
yield I+'PyObject* self = %s(wobj);' % _Cast()
yield I+'return PyObject_Init(self, %s);' % wtype
yield '}'
def _CreateInputParameter(func_name, ast_param, arg, args):
"""Returns tuple of (bool, str) and appends to args."""
# First return value is bool check_nullptr.
# Second return value is a string to create C++ stack var named arg.
# Sideeffect: args += arg getter.
ptype = ast_param.type
ctype = ptype.cpp_type
smartptr = (ctype.startswith('::std::unique_ptr') or
ctype.startswith('::std::shared_ptr'))
# std::function special case
if not ctype:
assert ptype.callable, 'Non-callable param has empty cpp_type'
if len(ptype.callable.returns) > 1:
raise ValueError('Callbacks may not have any output parameters, '
'%s param %s has %d' % (func_name, ast_param.name.native,
len(ptype.callable.returns)-1))
args.append('std::move(%s)' % arg)
return (
False,
'std::function<%s> %s;' % (
astutils.StdFuncParamStr(ptype.callable), arg))
# T*
if ptype.cpp_raw_pointer:
if ptype.cpp_toptr_conversion:
args.append(arg)
return (False, '%s %s;' % (ctype, arg))
t = ctype[:-1]
if ctype.endswith('*'):
if ptype.cpp_abstract:
if ptype.cpp_touniqptr_conversion:
args.append(arg+'.get()')
return (False, '::std::unique_ptr<%s> %s;' % (t, arg))
elif ptype.cpp_has_public_dtor:
# Create a copy on stack and pass its address.
if ptype.cpp_has_def_ctor:
args.append('&'+arg)
return (False, '%s %s;' % (t, arg))
else:
args.append('&%s.value()' % arg)
return (False, '::absl::optional<%s> %s;' % (t, arg))
raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype))
if ((smartptr or ptype.cpp_abstract) and
not ptype.cpp_touniqptr_conversion and
not (ctype.startswith('::std::unique_ptr') and
ast_param.default_value == 'default')):
raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s'
', no valid conversion defined'
% (ast_param.name.native, ctype, func_name))
# unique_ptr<T>, shared_ptr<T>
if smartptr:
args.append('std::move(%s)' % arg)
return (False, '%s %s;' % (ctype, arg))
# T, [const] T&
if ptype.cpp_toptr_conversion:
args.append('*'+arg)
return (True, '%s* %s;' % (ctype, arg))
if ptype.cpp_abstract: # for AbstractType &
args.append('*'+arg)
return (False, 'std::unique_ptr<%s> %s;' % (ctype, arg))
# Create a copy on stack (even fot T&, most cases should have to_T* conv).
if ptype.cpp_has_def_ctor:
args.append('std::move(%s)' % arg)
return (False, '%s %s;' % (ctype, arg))
else:
args.append(arg+'.value()')
return (False, '::absl::optional<%s> %s;' % (ctype, arg))
def FunctionCall(pyname, wrapper, doc, catch, call, postcall_init,
typepostconversion, func_ast, lineno, prepend_self=None):
"""Generate PyCFunction wrapper from AST.FuncDecl func_ast.
Args:
pyname: str - Python function name (may be special: ends with @)
wrapper: str - generated function name
doc: str - C++ signature
catch: bool - catch C++ exceptions
call: str | [str] - C++ command(s) to call the wrapped function
(without "(params);" part).
postcall_init: str - C++ command; to (re)set ret0.
typepostconversion: dict(pytype, index) to convert to pytype
func_ast: AST.FuncDecl protobuf
lineno: int - .clif line number where func_ast defined
prepend_self: AST.Param - Use self as 1st parameter.
Yields:
Source code for wrapped function.
Raises:
ValueError: for non-supported default arguments
"""
ctxmgr = pyname.endswith('@')
if ctxmgr:
ctxmgr = pyname
assert ctxmgr in ('__enter__@', '__exit__@'), (
'Invalid context manager name ' + pyname)
pyname = pyname.rstrip('@')
nret = len(func_ast.returns)
return_type = astutils.FuncReturnType(func_ast) # Can't use cpp_exact_type.
# return_type mangled to FQN and drop &, sadly it also drop const.
void_return_type = 'void' == return_type
# Has extra func parameters for output values.
xouts = nret > (0 if void_return_type else 1)
params = [] # C++ parameter names.
nargs = len(func_ast.params)
is_ternaryfunc_slot = pyname == '__call__'
yield ''
if func_ast.classmethod:
yield '// @classmethod ' + doc
arg0 = 'cls' # Extra protection that generated code does not use 'self'.
else:
yield '// ' + doc
arg0 = 'self'
needs_kw = nargs or is_ternaryfunc_slot
yield 'static PyObject* %s(PyObject* %s%s) {' % (
wrapper, arg0, ', PyObject* args, PyObject* kw' if needs_kw else '')
if is_ternaryfunc_slot and not nargs:
yield I+('if (!ensure_no_args_and_kw_args("%s", args, kw)) return nullptr;'
% pyname)
if prepend_self:
unused_check_nullptr, out = _CreateInputParameter(
pyname+' line %d' % lineno, prepend_self, 'arg0', params)
yield I+out
yield I+'if (!Clif_PyObjAs(self, &arg0)) return nullptr;'
minargs = sum(1 for p in func_ast.params if not p.default_value)
if nargs:
yield I+'PyObject* a[%d]%s;' % (nargs, '' if minargs == nargs else '{}')
yield I+'const char* names[] = {'
for p in func_ast.params:
yield I+I+I+'"%s",' % p.name.native
yield I+I+I+'nullptr'
yield I+'};'
yield I+('if (!PyArg_ParseTupleAndKeywords(args, kw, "%s:%s", '
'const_cast<char**>(names), %s)) '
'return nullptr;' % ('O'*nargs if minargs == nargs else
'O'*minargs+'|'+'O'*(nargs-minargs), pyname,
', '.join('&a[%d]'%i for i in range(nargs))))
if minargs < nargs and not xouts:
yield I+'int nargs; // Find how many args actually passed in.'
yield I+'for (nargs = %d; nargs > %d; --nargs) {' % (nargs, minargs)
yield I+I+'if (a[nargs-1] != nullptr) break;'
yield I+'}'
# Convert input parameters from Python.
for i, p in enumerate(func_ast.params):
n = i+1
arg = 'arg%d' % n
check_nullptr, out = _CreateInputParameter(
pyname+' line %d' % lineno, p, arg, params)
yield I+out
return_arg_err = (
'return ArgError("{func_name}", names[{i}], "{ctype}", a[{i}]);'
).format(i=i, func_name=pyname, ctype=astutils.Type(p))
cvt = ('if (!Clif_PyObjAs(a[{i}], &{cvar}{postconv})) {return_arg_err}'
).format(i=i, cvar=arg, return_arg_err=return_arg_err,
# Add post conversion parameter for std::function.
postconv='' if p.type.cpp_type else ', {%s}' % ', '.join(
postconv.Initializer(t.type, typepostconversion)
for t in p.type.callable.params))
def YieldCheckNullptr(ii):
# pylint: disable=cell-var-from-loop
if check_nullptr:
yield ii+'if (%s == nullptr) {' % arg
yield ii+I+return_arg_err
yield ii+'}'
if i < minargs:
# Non-default parameter.
yield I+cvt
for s in YieldCheckNullptr(I):
yield s
else:
if xouts:
_I = '' # pylint: disable=invalid-name
else:
_I = I # pylint: disable=invalid-name
yield I+'if (nargs > %d) {' % i
# Check if we're passed kw args, skipping some default C++ args.
# In this case we must substitute missed default args with default_value
if (p.default_value == 'default' # Matcher could not find the default.
or 'inf' in p.default_value): # W/A for b/29437257
if xouts:
raise ValueError("Can't supply the default for C++ function"
' argument. Drop =default in def %s(%s).'
% (pyname, p.name.native))
if n < nargs:
if p.type.cpp_type.startswith('::std::unique_ptr'):
yield I+I+'if (!a[%d]) { /* default-constructed smartptr */ }' % i
yield I+I+'else '+cvt
else:
yield I+I+('if (!a[{i}]) return DefaultArgMissedError('
'"{}", names[{i}]);'.format(pyname, i=i))
yield I+I+cvt
else:
yield I+I+cvt
for s in YieldCheckNullptr(I+I):
yield s
elif (p.default_value and
params[-1].startswith('&') and p.type.cpp_raw_pointer):
# Special case for a pointer to an integral type param (like int*).
raise ValueError('A default for integral type pointer argument is '
' not supported. Drop =default in def %s(%s).'
% (pyname, p.name.native))
else:
# C-cast takes care of the case where |arg| is an enum value, while
# the matcher would return an integral literal. Using static_cast
# would be ideal, but its argument should be an expression, which a
# struct value like {1, 2, 3} is not.
yield _I+I+'if (!a[%d]) %s = (%s)%s;' % (i, arg, astutils.Type(p),
p.default_value)
yield _I+I+'else '+cvt
for s in YieldCheckNullptr(_I+I):
yield s
if not xouts:
yield I+'}'
# Create input parameters for extra return values.
for n, p in enumerate(func_ast.returns):
if n or void_return_type:
yield I+'%s ret%d{};' % (astutils.Type(p), n)
params.append('&ret%d' % n)
yield I+'// Call actual C++ method.'
if isinstance(call, list):
for s in call[:-1]:
yield I+s
call = call[-1]
if not func_ast.py_keep_gil:
if nargs:
yield I+'Py_INCREF(args);'
yield I+'Py_XINCREF(kw);'
yield I+'PyThreadState* _save;'
yield I+'Py_UNBLOCK_THREADS'
optional_ret0 = False
convert_ref_to_ptr = False
if (minargs < nargs or catch) and not void_return_type:
if catch and return_type.rstrip().endswith('&'):
convert_ref_to_ptr = True
idx = return_type.rindex('&')
return_type = return_type[:idx] + '*'
if func_ast.returns[0].type.cpp_has_def_ctor:
yield I+return_type+' ret0;'
else:
# Using optional<> requires T be have T(x) and T::op=(x) available.
# While we need only t=x, implementing it will be a pain we skip for now.
yield I+'::absl::optional<%s> ret0;' % return_type
optional_ret0 = True
if catch:
for s in _GenExceptionTry():
yield s
if minargs < nargs and not xouts:
if not void_return_type:
call = 'ret0 = '+call
yield I+'switch (nargs) {'
for n in range(minargs, nargs+1):
yield I+'case %d:' % n
if func_ast.is_extend_method and func_ast.constructor:
call_with_params = call % (func_ast.name.cpp_name,
astutils.TupleStr(params[:n]))
else:
num_params = n
# extended methods need to include `self` as the first parameter, but
# extended constructors do not.
if func_ast.is_extend_method:
num_params += 1
call_with_params = call + astutils.TupleStr(params[:num_params])
yield I+I+'%s; break;' % call_with_params
yield I+'}'
else:
if func_ast.is_extend_method and func_ast.constructor:
call = call % (func_ast.name.cpp_name, astutils.TupleStr(params))
else:
call += astutils.TupleStr(params)
_I = I if catch else '' # pylint: disable=invalid-name
if void_return_type:
yield _I+I+call+';'
elif catch:
if convert_ref_to_ptr:
yield _I+I+'ret0 = &'+call+';'
else:
yield _I+I+'ret0 = '+call+';'
else:
yield _I+I+return_type+' ret0 = '+call+';'
if catch:
for s in _GenExceptionCatch():
yield s
if postcall_init:
if void_return_type:
yield I+postcall_init
else:
yield I+'ret0'+postcall_init
if not func_ast.py_keep_gil:
yield I+'Py_BLOCK_THREADS'
if nargs:
yield I+'Py_DECREF(args);'
yield I+'Py_XDECREF(kw);'
if catch:
for s in _GenExceptionRaise():
yield s
if func_ast.postproc == '->self':
func_ast.postproc = ''
return_self = True
assert nret == 0, '-> self must have no other output parameters'
else:
return_self = False
ret = '*ret' if convert_ref_to_ptr else 'ret'
# If ctxmgr, force return self on enter, None on exit.
if nret > 1 or (func_ast.postproc or ctxmgr) and nret:
yield I+'// Convert return values to Python.'
yield I+'PyObject* p, * result_tuple = PyTuple_New(%d);' % nret
yield I+'if (result_tuple == nullptr) return nullptr;'
for i in range(nret):
yield I+'if ((p=Clif_PyObjFrom(std::move(%s%d), %s)) == nullptr) {' % (
ret, i,
postconv.Initializer(
func_ast.returns[i].type,
typepostconversion,
marked_non_raising=func_ast.marked_non_raising))
yield I+I+'Py_DECREF(result_tuple);'
yield I+I+'return nullptr;'
yield I+'}'
yield I+'PyTuple_SET_ITEM(result_tuple, %d, p);' % i
if func_ast.postproc:
yield I+'PyObject* pyproc = ImportFQName("%s");' % func_ast.postproc
yield I+'if (pyproc == nullptr) {'
yield I+I+'Py_DECREF(result_tuple);'
yield I+I+'return nullptr;'
yield I+'}'
yield I+'p = PyObject_CallObject(pyproc, result_tuple);'
yield I+'Py_DECREF(pyproc);'
yield I+'Py_CLEAR(result_tuple);'
if ctxmgr:
yield I+'if (p == nullptr) return nullptr;'
yield I+'Py_DECREF(p); // Not needed by the context manager.'
else:
yield I+'result_tuple = p;'
if ctxmgr == '__enter__@':
yield I+'Py_XDECREF(result_tuple);'
yield I+'Py_INCREF(self);'
yield I+'return self;'
elif ctxmgr == '__exit__@':
yield I+'Py_XDECREF(result_tuple);'
yield I+'Py_RETURN_NONE;'
else:
yield I+'return result_tuple;'
elif nret:
yield I+'return Clif_PyObjFrom(std::move(%s0%s), %s);' % (
ret, ('.value()' if optional_ret0 else ''),
postconv.Initializer(
func_ast.returns[0].type,
typepostconversion,
marked_non_raising=func_ast.marked_non_raising))
elif return_self or ctxmgr == '__enter__@':
yield I+'Py_INCREF(self);'
yield I+'return self;'
else:
yield I+'Py_RETURN_NONE;'
yield '}'
def _GenExceptionTry():
yield I+'PyObject* err_type = nullptr;'
yield I+'std::string err_msg{"C++ exception"};'
yield I+'try {'
def _GenExceptionCatch():
yield I+'} catch(const std::exception& e) {'
yield I+I+'err_type = PyExc_RuntimeError;'
yield I+I+'err_msg += std::string(": ") + e.what();'
yield I+'} catch (...) {'
yield I+I+'err_type = PyExc_RuntimeError;'
yield I+'}'
def _GenExceptionRaise():
yield I+'if (err_type) {'
yield I+I+'PyErr_SetString(err_type, err_msg.c_str());'
yield I+I+'return nullptr;'
yield I+'}'
def _VirtualFunctionCall(fname, f, pyname, abstract, postconvinit):
"""Generate virtual redirector call wrapper from AST.FuncDecl f."""
name = f.name.cpp_name
ret = astutils.FuncReturnType(f, true_cpp_type=True)
arg = astutils.FuncParamStr(f, 'a', true_cpp_type=True)
mod = ['']
if f.cpp_const_method: mod.append('const')
if f.cpp_noexcept: mod.append('noexcept')
yield ''
yield I+'%s %s%s%s override {' % (ret, fname, arg, ' '.join(mod))
params = astutils.TupleStr('std::move(a%i)' % i for i in range(
len(f.params) + len(f.returns) - (ret != 'void')))
yield I+I+'SafeAttr impl(self(), "%s");' % f.name.native
yield I+I+'if (impl.get()) {'
ret_st = 'return ' if ret != 'void' else ''
yield I+I+I+'%s::clif::callback::Func<%s>(impl.get(), {%s})%s;' % (
ret_st, ', '.join(
[ret] +
list(astutils.ExactTypeOrType(a) for a in f.params) +
list(astutils.FuncReturns(f))),
', '.join(postconvinit(a.type) for a in f.params), params)
yield I+I+'} else {'
if abstract:
# This is only called from C++. Since f has no info if it is pure virtual,
# we can't always generate the call, so we always fail in an abstract class.
yield I+I+I+('Py_FatalError("@virtual method %s.%s has no Python '
'implementation.");' % (pyname, f.name.native))
# In Python 2 Py_FatalError is not marked __attribute__((__noreturn__)),
# so to avoid -Wreturn-type warning add extra abort(). It does not hurt ;)
yield I+I+I+'abort();'
else:
yield I+I+I+ret_st + name + params + ';'
yield I+I+'}'
yield I+'}'
def CastAsCapsule(wrapped_cpp, pointer_name, wrapper):
yield ''
yield '// Implicit cast this as %s*' % pointer_name
yield 'static PyObject* %s(PyObject* self) {' % wrapper
yield I+'%s* p = ::clif::python::Get(%s);' % (pointer_name, wrapped_cpp)
yield I+'if (p == nullptr) return nullptr;'
yield I+('return PyCapsule_New(p, "%s", nullptr);') % pointer_name
yield '}'
class _NewIter(object):
"""Generate the new_iter function."""
name = 'new_iter'
def __call__(self, wrapped_iter, ns, wrapper, wrapper_type):
yield ''
yield 'PyObject* new_iter(PyObject* self) {'
yield I+'if (!ThisPtr(self)) return nullptr;'
yield I+'%s* it = PyObject_New(%s, %s);' % (wrapper, wrapper, wrapper_type)
yield I+'if (!it) return nullptr;'
yield I+'using std::equal_to; // Often a default template argument.'
yield I+'new(&it->iter) %siterator(MakeStdShared(%s));' % (ns, wrapped_iter)
yield I+'return %s(it);' % _Cast()
yield '}'
NewIter = _NewIter() # pylint: disable=invalid-name
class _IterNext(object):
"""Generate the iternext function."""
name = 'iternext'
def __call__(self, wrapped_iter, is_async, postconversion):
"""Generate tp_iternext method implementation."""
yield ''
yield 'PyObject* iternext(PyObject* self) {'
if is_async:
yield I+'PyThreadState* _save;'
yield I+'Py_UNBLOCK_THREADS'
yield I+'auto* v = %s.Next();' % wrapped_iter
if is_async:
yield I+'Py_BLOCK_THREADS'
yield I+'return v? Clif_PyObjFrom(*v, %s): nullptr;' % postconversion
yield '}'
IterNext = _IterNext() # pylint: disable=invalid-name
def FromFunctionDef(ctype, wdef, wname, flags, doc):
"""PyCFunc definition."""
assert ctype.startswith('std::function<'), repr(ctype)
return 'static PyMethodDef %s = %s;' % (wdef, _DefLine('', wname, flags, doc))
def VarGetter(name, cfunc, error, cvar, pc, is_extend=False):
"""Generate var getter."""
xdata = '' if cfunc else ', void* xdata'
yield ''
yield 'static PyObject* %s(PyObject* self%s) {' % (name, xdata)
if error and not is_extend:
yield I+error+'return nullptr;'
yield I+'return Clif_PyObjFrom(%s, %s);' % (cvar, pc)
yield '}'
def VarSetter(name, cfunc, error, cvar, v, csetter, as_str, is_extend=False):
"""Generate var setter.
Args:
name: setter function name
cfunc: (True/False) generate setter as a CFunction
error: C++ condition to return error if any
cvar: C var name to set new value to directly
v: VAR AST
csetter: C++ call expression to set var (without '(newvalue)') if any
as_str: Python str -> C str function (different for Py2/3)
is_extend: True for @extend properties in the .clif file.
Yields:
Source code for setter function.
"""
yield ''
if cfunc:
yield 'static PyObject* %s(PyObject* self, PyObject* value) {' % name
ret_error = 'return nullptr;'
ret_ok = 'Py_RETURN_NONE;'
else:
yield ('static int %s(PyObject* self, PyObject* value, void* xdata) {'
% name)
ret_error = 'return -1;'
ret_ok = 'return 0;'
yield I+'if (value == nullptr) {'
yield I+I+('PyErr_SetString(PyExc_TypeError, "Cannot delete the'
' %s attribute");' % v.name.native)
yield I+I+ret_error
yield I+'}'
if csetter:
# Workaround BUG "v.type.cpp_type not updated by Matcher", so get p[0].
yield I+'%s cval;' % v.cpp_set.params[0].type.cpp_type
yield I+'if (Clif_PyObjAs(value, &cval)) {'
if error:
yield I+I+error+ret_error
if is_extend:
yield I+I+csetter + '(*cpp, cval);'
else:
yield I+I+csetter + '(cval);'
yield I+I+ret_ok
yield I+'}'
if not csetter:
if error:
yield I+error+ret_error
yield I+'if (Clif_PyObjAs(value, &%s)) ' % cvar + ret_ok
yield I+'PyObject* s = PyObject_Repr(value);'
yield I+('PyErr_Format(PyExc_ValueError, "%s is not valid for {}:{}", s? {}'
'(s): "input");').format(v.name.native, v.type.lang_type, as_str)
yield I+'Py_XDECREF(s);'
yield I+ret_error
yield '}'
def _Cast(t='PyObject'):
assert not t.endswith('*')
return 'reinterpret_cast<%s*>' % t
|
|
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import sys
import types
from .azure_common import BaseTest, DEFAULT_SUBSCRIPTION_ID
from c7n_azure.tags import TagHelper
from c7n_azure.utils import (AppInsightsHelper, ManagedGroupHelper, Math, PortsRangeHelper,
ResourceIdParser, StringUtils, custodian_azure_send_override,
get_keyvault_secret, get_service_tag_ip_space, is_resource_group_id,
is_resource_group)
from mock import patch, Mock
from c7n.config import Bag
import pytest
try:
from importlib import reload
except Exception:
pass # Python 2.7 has reload built-in
RESOURCE_ID = (
"/subscriptions/%s/resourceGroups/"
"rgtest/providers/Microsoft.Compute/virtualMachines/nametest" % DEFAULT_SUBSCRIPTION_ID)
RESOURCE_ID_CHILD = (
"/subscriptions/%s/resourceGroups/"
"rgtest/providers/Microsoft.Sql/servers/testserver/"
"databases/testdb" % DEFAULT_SUBSCRIPTION_ID)
GUID = '00000000-0000-0000-0000-000000000000'
class UtilsTest(BaseTest):
def setUp(self):
super(UtilsTest, self).setUp()
def test_get_subscription_id(self):
self.assertEqual(ResourceIdParser.get_subscription_id(RESOURCE_ID), DEFAULT_SUBSCRIPTION_ID)
def test_get_namespace(self):
self.assertEqual(ResourceIdParser.get_namespace(RESOURCE_ID), "Microsoft.Compute")
self.assertEqual(ResourceIdParser.get_namespace(RESOURCE_ID_CHILD), "Microsoft.Sql")
def test_get_resource_group(self):
self.assertEqual(ResourceIdParser.get_resource_group(RESOURCE_ID), "rgtest")
def test_get_resource_type(self):
self.assertEqual(ResourceIdParser.get_resource_type(RESOURCE_ID), "virtualMachines")
self.assertEqual(ResourceIdParser.get_resource_type(RESOURCE_ID_CHILD), "servers/databases")
def test_get_full_type(self):
self.assertEqual(ResourceIdParser.get_full_type(RESOURCE_ID),
"Microsoft.Compute/virtualMachines")
def test_resource_name(self):
self.assertEqual(ResourceIdParser.get_resource_name(RESOURCE_ID), "nametest")
def test_math_mean(self):
self.assertEqual(Math.mean([4, 5, None, 3]), 4)
self.assertEqual(Math.mean([None]), 0)
self.assertEqual(Math.mean([3, 4]), 3.5)
def test_math_sum(self):
self.assertEqual(Math.sum([4, 5, None, 3]), 12)
self.assertEqual(Math.sum([None]), 0)
self.assertEqual(Math.sum([3.5, 4]), 7.5)
def test_string_utils_equal(self):
# Case insensitive matches
self.assertTrue(StringUtils.equal("FOO", "foo"))
self.assertTrue(StringUtils.equal("fOo", "FoO"))
self.assertTrue(StringUtils.equal("ABCDEFGH", "abcdefgh"))
self.assertFalse(StringUtils.equal("Foo", "Bar"))
# Case sensitive matches
self.assertFalse(StringUtils.equal("Foo", "foo", False))
self.assertTrue(StringUtils.equal("foo", "foo", False))
self.assertTrue(StringUtils.equal("fOo", "fOo", False))
self.assertFalse(StringUtils.equal("Foo", "Bar"))
# Strip whitespace matches
self.assertTrue(StringUtils.equal(" Foo ", "foo"))
self.assertTrue(StringUtils.equal("Foo", " foo "))
self.assertTrue(StringUtils.equal(" Foo ", "Foo", False))
self.assertTrue(StringUtils.equal("Foo", " Foo ", False))
# Returns false for non string types
self.assertFalse(StringUtils.equal(1, "foo"))
self.assertFalse(StringUtils.equal("foo", 1))
self.assertFalse(StringUtils.equal(True, False))
def test_get_tag_value(self):
resource = {'tags': {'tag1': 'value1', 'tAg2': 'VaLuE2', 'TAG3': 'VALUE3'}}
self.assertEqual(TagHelper.get_tag_value(resource, 'tag1', True), 'value1')
self.assertEqual(TagHelper.get_tag_value(resource, 'tag2', True), 'VaLuE2')
self.assertEqual(TagHelper.get_tag_value(resource, 'tag3', True), 'VALUE3')
def test_get_ports(self):
self.assertEqual(PortsRangeHelper.get_ports_set_from_string("5, 4-5, 9"), {4, 5, 9})
rule = {'properties': {'destinationPortRange': '10-12'}}
self.assertEqual(PortsRangeHelper.get_ports_set_from_rule(rule), {10, 11, 12})
rule = {'properties': {'destinationPortRanges': ['80', '10-12']}}
self.assertEqual(PortsRangeHelper.get_ports_set_from_rule(rule), {10, 11, 12, 80})
def test_validate_ports_string(self):
self.assertEqual(PortsRangeHelper.validate_ports_string('80'), True)
self.assertEqual(PortsRangeHelper.validate_ports_string('22-26'), True)
self.assertEqual(PortsRangeHelper.validate_ports_string('80,22'), True)
self.assertEqual(PortsRangeHelper.validate_ports_string('80,22-26'), True)
self.assertEqual(PortsRangeHelper.validate_ports_string('80,22-26,30-34'), True)
self.assertEqual(PortsRangeHelper.validate_ports_string('65537'), False)
self.assertEqual(PortsRangeHelper.validate_ports_string('-1'), False)
self.assertEqual(PortsRangeHelper.validate_ports_string('10-8'), False)
self.assertEqual(PortsRangeHelper.validate_ports_string('80,30,25-65538'), False)
self.assertEqual(PortsRangeHelper.validate_ports_string('65536-65537'), False)
def test_get_ports_strings_from_list(self):
self.assertEqual(PortsRangeHelper.get_ports_strings_from_list([]),
[])
self.assertEqual(PortsRangeHelper.get_ports_strings_from_list([10, 11]),
['10-11'])
self.assertEqual(PortsRangeHelper.get_ports_strings_from_list([10, 12, 13, 14]),
['10', '12-14'])
self.assertEqual(PortsRangeHelper.get_ports_strings_from_list([10, 12, 13, 14, 20, 21, 22]),
['10', '12-14', '20-22'])
def test_build_ports_dict(self):
securityRules = [
{'properties': {'destinationPortRange': '80-84',
'priority': 100,
'direction': 'Outbound',
'access': 'Allow',
'protocol': 'TCP'}},
{'properties': {'destinationPortRange': '85-89',
'priority': 110,
'direction': 'Outbound',
'access': 'Allow',
'protocol': 'UDP'}},
{'properties': {'destinationPortRange': '80-84',
'priority': 120,
'direction': 'Inbound',
'access': 'Deny',
'protocol': 'TCP'}},
{'properties': {'destinationPortRange': '85-89',
'priority': 130,
'direction': 'Inbound',
'access': 'Deny',
'protocol': 'UDP'}},
{'properties': {'destinationPortRange': '80-89',
'priority': 140,
'direction': 'Inbound',
'access': 'Allow',
'protocol': '*'}}]
nsg = {'properties': {'securityRules': securityRules}}
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Inbound', 'TCP'),
{k: k > 84 for k in range(80, 90)})
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Inbound', 'UDP'),
{k: k < 85 for k in range(80, 90)})
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Inbound', '*'),
{k: False for k in range(80, 90)})
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Outbound', 'TCP'),
{k: True for k in range(80, 85)})
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Outbound', 'UDP'),
{k: True for k in range(85, 90)})
self.assertEqual(PortsRangeHelper.build_ports_dict(nsg, 'Outbound', '*'),
{k: True for k in range(80, 90)})
def test_snake_to_camel(self):
self.assertEqual(StringUtils.snake_to_camel(""), "")
self.assertEqual(StringUtils.snake_to_camel("test"), "test")
self.assertEqual(StringUtils.snake_to_camel("test_abc"), "testAbc")
self.assertEqual(StringUtils.snake_to_camel("test_abc_def"), "testAbcDef")
def test_naming_hash(self):
source = 'Lorem ipsum dolor sit amet'
source2 = 'amet sit dolor ipsum Lorem'
self.assertEqual(StringUtils.naming_hash(source), '16aba539')
self.assertEqual(StringUtils.naming_hash(source, 10), '16aba5393a')
self.assertNotEqual(StringUtils.naming_hash(source), StringUtils.naming_hash(source2))
@patch('azure.mgmt.applicationinsights.operations.ComponentsOperations.get',
return_value=type(str('result_data'), (), {'instrumentation_key': GUID}))
def test_app_insights_get_instrumentation_key(self, mock_handler_run):
self.assertEqual(AppInsightsHelper.get_instrumentation_key('azure://' + GUID), GUID)
self.assertEqual(AppInsightsHelper.get_instrumentation_key('azure://resourceGroup/name'),
GUID)
mock_handler_run.assert_called_once_with('resourceGroup', 'name')
@patch('c7n_azure.utils.send_logger.debug')
def test_custodian_azure_send_override_200(self, logger):
mock = Mock()
mock.send = types.MethodType(custodian_azure_send_override, mock)
response_dict = {
'headers': {'x-ms-ratelimit-remaining-subscription-reads': '12000'},
'status_code': 200
}
mock.orig_send.return_value = type(str('response'), (), response_dict)
mock.send('')
self.assertEqual(mock.orig_send.call_count, 1)
self.assertEqual(logger.call_count, 2)
@patch('c7n_azure.utils.send_logger.debug')
@patch('c7n_azure.utils.send_logger.warning')
def test_custodian_azure_send_override_429(self, logger_debug, logger_warning):
mock = Mock()
mock.send = types.MethodType(custodian_azure_send_override, mock)
response_dict = {
'headers': {'Retry-After': 0},
'status_code': 429
}
mock.orig_send.return_value = type(str('response'), (), response_dict)
mock.send('')
self.assertEqual(mock.orig_send.call_count, 3)
self.assertEqual(logger_debug.call_count, 3)
self.assertEqual(logger_warning.call_count, 3)
@patch('c7n_azure.utils.send_logger.error')
def test_custodian_azure_send_override_429_long_retry(self, logger):
mock = Mock()
mock.send = types.MethodType(custodian_azure_send_override, mock)
response_dict = {
'headers': {'Retry-After': 60},
'status_code': 429
}
mock.orig_send.return_value = type(str('response'), (), response_dict)
mock.send('')
self.assertEqual(mock.orig_send.call_count, 1)
self.assertEqual(logger.call_count, 1)
managed_group_return_value = Bag({
'properties': {
'name': 'dev',
'type': '/providers/Micrsoft.Management/managementGroups',
'children': [
Bag({'name': DEFAULT_SUBSCRIPTION_ID,
'type': '/subscriptions'}),
Bag({'name': 'east',
'type': '/providers/Microsoft.Management/managementGroups',
'children': [{
'type': '/subscriptions',
'name': GUID}]})
],
}
})
managed_group_return_value['serialize'] = lambda self=managed_group_return_value: self
@patch((
'azure.mgmt.managementgroups.operations'
'.management_groups_operations.ManagementGroupsOperations.get'),
return_value=managed_group_return_value)
def test_managed_group_helper(self, _1):
sub_ids = ManagedGroupHelper.get_subscriptions_list('test-group', "")
self.assertEqual(sub_ids, [DEFAULT_SUBSCRIPTION_ID, GUID])
@patch('msrestazure.azure_active_directory.MSIAuthentication')
def test_get_keyvault_secret(self, _1):
mock = Mock()
mock.value = '{"client_id": "client", "client_secret": "secret"}'
with patch('azure.common.credentials.ServicePrincipalCredentials.__init__',
return_value=None), \
patch('azure.keyvault.v7_0.KeyVaultClient.get_secret', return_value=mock):
reload(sys.modules['c7n_azure.utils'])
result = get_keyvault_secret(None, 'https://testkv.vault.net/secrets/testsecret/123412')
self.assertEqual(result, mock.value)
# Test relies on substitute data in Azure Common, not designed for live data
@pytest.mark.skiplive
def test_get_service_tag_ip_space(self):
# Get with region
result = get_service_tag_ip_space('ApiManagement', 'WestUS')
self.assertEqual(3, len(result))
self.assertEqual({"13.64.39.16/32",
"40.112.242.148/31",
"40.112.243.240/28"}, set(result))
# Get without region
result = get_service_tag_ip_space('ApiManagement')
self.assertEqual(5, len(result))
self.assertEqual({"13.69.64.76/31",
"13.69.66.144/28",
"23.101.67.140/32",
"51.145.179.78/32",
"137.117.160.56/32"}, set(result))
# Invalid tag
result = get_service_tag_ip_space('foo')
self.assertEqual(0, len(result))
def test_is_resource_group_id(self):
self.assertTrue(is_resource_group_id('/subscriptions/GUID/resourceGroups/rg'))
self.assertTrue(is_resource_group_id('/subscriptions/GUID/resourceGroups/rg/'))
self.assertTrue(is_resource_group_id('/Subscriptions/GUID/resourcegroups/rg'))
self.assertFalse(is_resource_group_id('/subscriptions/GUID/rg/'))
self.assertFalse(is_resource_group_id('subscriptions/GUID/rg/'))
self.assertFalse(is_resource_group_id('/GUID/rg/'))
self.assertFalse(is_resource_group_id('/subscriptions/GUID/rg/providers/vm/vm'))
self.assertFalse(is_resource_group_id('/subscriptions/GUID/rg/providers'))
self.assertFalse(is_resource_group_id('/subscriptions/GUID/rg/p'))
def test_is_resource_group(self):
self.assertTrue(is_resource_group({'type': 'resourceGroups'}))
self.assertFalse(is_resource_group({'type': 'virtualMachines'}))
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:59:50 2015
@author: moph8ia2
"""
import random
import numpy as np
import os
import sys
import time
def anyarg(listing,test):
for elem in listing:
if elem == test:
return True
return False
def naivesum(a):
b = np.float64(0.0)
for i in xrange(a.size):
b = b + a[i]
return b
def bignaivesum(a):
b = np.float128(0.0)
for i in xrange(a.size):
b = b + a[i]
return b
def sortsum(a):
sortnum = np.sort(a, axis=-1)
for i in xrange(sortnum.size):
if ( i == 0):
b = sortnum[i]
else:
b = b + sortnum[i]
return b
def bigsortsum(a):
sortnum = np.sort(a, axis=-1)
b = np.float128(0.0)
for i in xrange(sortnum.size):
b = b + sortnum[i]
return b
def kahansum(a):
b = np.float64(0.0)
c = np.float64(0.0)
y = np.float64(0.0)
t = np.float64(0.0)
for i in xrange(a.size):
y = a[i] - c
t = b + y
c = (t - b) - y
b = t
return b
def splitList(a):
b = a[0:a.size//2]
c = a[a.size//2:]
return(b,c)
def pairsum(a):
s = np.float128(0.0)
if a.size <= 100:
for i in a:
s = s + i
else:
b,c = splitList(a)
s = pairsum(b) + pairsum(c)
return s
def bigkahansum(a):
b = np.float128(0.0)
c = np.float128(0.0)
y = np.float128(0.0)
t = np.float128(0.0)
for i in xrange(a.size):
y = a[i] - c
t = b + y
c = (t - b) - y
b = t
return b
def histpairsum(a,n):
b = np.zeros(n, dtype=np.float64)
c1 = 0
c2 = 0
c3 = 0
for i in a:
if i < 1:
c1 = c1 + 1
for i in xrange(a.size):
if a[i] < 1:
b[c2] = a[i]
c2 = c2 + 1
else:
b[c1+c3] = a[i]
c3 = c3 + 1
totsum = np.sum(b)
return totsum
def intsum(a,b):
c = np.float128(0.0)
d = np.float128(0.0)
c = pairsum(a)
d = pairsum(b)
# for i in xrange(a.size):
# c = c + a[i]
# d = d + b[i]
d = d / 1e12
d = d + c
return d
def fixsum(a,b):
c = np.int64(0)
d = np.int64(0)
e = np.float128(0.0)
for i in xrange(a.size):
c = c + a[i]
d = d + b[i]
e = np.float128(d)
e = e / 1e20
e = e + np.float128(c)
return c
#First set the random number generator
random.seed(os.urandom(64))
#Then set the number of iterations
#n = 10000000
n = 100000
#Then set the primary random number array
numbers = np.empty([n], dtype=np.float64)
#Then set the exponential container
expnumbers = np.empty([n], dtype=np.float64)
bigexpnumbers = np.empty([n], dtype=np.float128)
#Then start the generation loop
for i in xrange(n):
numbers[i] = random.uniform(-20,20)
#get the floored version of each number
floored = np.floor(numbers)
#Now get the exponentials
expnumbers = np.exp(numbers)
floor_expnum = np.floor(expnumbers)
int_expnum = np.int64(floor_expnum)
float_remainder_expnum = expnumbers - floor_expnum
int_re_expnum = np.int64(float_remainder_expnum*1e12)
fix_rexpnum = np.int64(expnumbers*1e20)
#Now lets do some sums
#In all cases calculate the naive summation
sortnum = np.sort(expnumbers)
bigout = np.sum(sortnum,dtype=np.float128)
nout = naivesum(expnumbers)
nbigout = bignaivesum(expnumbers)
ndelta = nbigout - nout
print "naive delta: %.10f" %(ndelta)
if ( anyarg(sys.argv[1:], "kahan")):
start = time.clock()
for i in xrange(1):
kout = kahansum(expnumbers)
end = time.clock()
dtime = end - start
print "time elapsed: %.10f" %(dtime)
kbigout = bigkahansum(expnumbers)
kdelta = bigout - kout
kbigdelta = bigout - kbigout
print "kdelta: %.10f" %(kdelta)
print "kbigdelta: %.10f" %(kbigdelta)
if ( anyarg(sys.argv[1:], "sort") ):
start = time.clock()
for i in xrange(1):
sout = sortsum(expnumbers)
end = time.clock()
dtime = end - start
sbigout = bigsortsum(expnumbers)
sdelta = bigout - sout
sbigdelta = bigout - sbigout
print "time elapsed: %.10f" %(dtime)
print "sdelta: %.10f" %(sdelta)
print "sbigdelta: %.10f" %(sbigdelta)
if ( anyarg(sys.argv[1:], "pairwise")):
start = time.clock()
for i in xrange(1):
pout = pairsum(expnumbers)
end = time.clock()
dtime = end - start
pbigout = np.sum(expnumbers,dtype=np.float128)
pdelta = bigout - pout
pbigdelta = bigout - pbigout
print "time elapsed: %.10f" %(dtime)
print "pdelta: %.10f" %(pdelta)
print "pbigdelta: %.10f" %(pbigdelta)
if ( anyarg(sys.argv[1:], "sortpair")):
sortnum = np.sort(expnumbers, axis=-1)
spout = np.sum(sortnum)
spbigout = np.sum(sortnum,dtype=np.float128)
spdelta = bigout - spout
spbigdelta = bigout - spbigout
print "spdelta: %.10f" %(spdelta)
print "spbigdelta: %.10f" %(spbigdelta)
if ( anyarg(sys.argv[1:], "histpair")):
start = time.clock()
hpout = histpairsum(expnumbers,n)
end = time.clock()
dtime = end - start
print "time elapsed: %.10f" %(dtime)
hpdelta = bigout - hpout
print "hpdelta: %.10f" %(hpdelta)
if ( anyarg(sys.argv[1:], "intsum")):
start = time.clock()
for i in xrange(1):
pout = intsum(int_expnum, int_re_expnum)
end = time.clock()
dtime = end - start
pdelta = bigout - pout
print "time elapsed: %.10f" %(dtime)
print "intsum delta: %.10f" %(pdelta)
if ( anyarg(sys.argv[1:], "fixsum")):
start = time.clock()
for i in xrange(1):
pout = fixsum(int_expnum, fix_rexpnum)
end = time.clock()
dtime = end - start
pdelta = bigout - pout
print "time elapsed: %.10f" %(dtime)
print "fixsum delta: %.10f" %(pdelta)
#Now lets print
#big deltas
|
|
"""
Images Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from PIL import Image
from scrapy.utils.misc import md5sum
from scrapy.utils.python import to_bytes
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
#TODO: from scrapy.pipelines.media import MediaPipeline
from scrapy.pipelines.files import FileException, FilesPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(FileException):
"""General image error exception"""
class ImagesPipeline(FilesPipeline):
"""Abstract pipeline that implement the image thumbnail generation logic
"""
MEDIA_NAME = 'image'
# Uppercase attributes kept for backward compatibility with code that subclasses
# ImagesPipeline. They may be overridden by settings.
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 90
THUMBS = {}
DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
super(ImagesPipeline, self).__init__(store_uri, settings=settings,
download_func=download_func)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="ImagesPipeline",
settings=settings)
self.expires = settings.getint(
resolve("IMAGES_EXPIRES"), self.EXPIRES
)
if not hasattr(self, "IMAGES_RESULT_FIELD"):
self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
if not hasattr(self, "IMAGES_URLS_FIELD"):
self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
self.images_urls_field = settings.get(
resolve('IMAGES_URLS_FIELD'),
self.IMAGES_URLS_FIELD
)
self.images_result_field = settings.get(
resolve('IMAGES_RESULT_FIELD'),
self.IMAGES_RESULT_FIELD
)
self.min_width = settings.getint(
resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
)
self.min_height = settings.getint(
resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
)
self.thumbs = settings.get(
resolve('IMAGES_THUMBS'), self.THUMBS
)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
store_uri = settings['IMAGES_STORE']
return cls(store_uri, settings=settings)
def file_downloaded(self, response, request, info):
return self.image_downloaded(response, request, info)
def image_downloaded(self, response, request, info):
checksum = None
for path, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
width, height = image.size
self.store.persist_file(
path, buf, info,
meta={'width': width, 'height': height},
headers={'Content-Type': 'image/jpeg'})
return checksum
def get_images(self, response, request, info):
path = self.file_path(request, response=response, info=info)
orig_image = Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.min_width, self.min_height))
image, buf = self.convert_image(orig_image)
yield path, image, buf
for thumb_id, size in six.iteritems(self.thumbs):
thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_path, thumb_image, thumb_buf
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
return image, buf
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.images_urls_field, [])]
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.images_result_field in item.fields:
item[self.images_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
image_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'full/%s.jpg' % (image_guid)
def thumb_path(self, request, thumb_id, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.thumb_key(url) method is deprecated, please use '
'thumb_path(request, thumb_id, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from thumb_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if thumb_key() method has been overridden
if not hasattr(self.thumb_key, '_base'):
_warn()
return self.thumb_key(url, thumb_id)
## end of deprecation warning block
thumb_guid = hashlib.sha1(to_bytes(url)).hexdigest() # change to request.url after deprecation
return 'thumbs/%s/%s.jpg' % (thumb_id, thumb_guid)
# deprecated
def file_key(self, url):
return self.image_key(url)
file_key._base = True
# deprecated
def image_key(self, url):
return self.file_path(url)
image_key._base = True
# deprecated
def thumb_key(self, url, thumb_id):
return self.thumb_path(url, thumb_id)
thumb_key._base = True
|
|
#!/usr/bin/env python
# $Rev$
# $URL$
# Imported from //depot/prj/plan9topam/master/code/plan9topam.py#4 on
# 2009-06-15.
"""Command line tool to convert from Plan 9 image format to PNG format.
Plan 9 image format description:
http://plan9.bell-labs.com/magic/man2html/6/image
"""
# http://www.python.org/doc/2.3.5/lib/module-itertools.html
import itertools
# http://www.python.org/doc/2.3.5/lib/module-re.html
import re
# http://www.python.org/doc/2.3.5/lib/module-sys.html
import sys
class Error(Exception):
"""Some sort of Plan 9 image error."""
def block(s, n):
return zip(* [iter(s)] * n)
def convert(f, output=sys.stdout):
"""Convert Plan 9 file to PNG format. Works with either uncompressed
or compressed files.
"""
r = f.read(11)
if r == 'compressed\n':
png(output, *decompress(f))
else:
png(output, *glue(f, r))
def glue(f, r):
"""Return (metadata, stream) pair where `r` is the initial portion of
the metadata that has already been read from the stream `f`.
"""
r = r + f.read(60 - len(r))
return (r, f)
def meta(r):
"""Convert 60 character string `r`, the metadata from an image file.
Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may
settle into lists in transit.
As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata
comprises 5 words separated by blanks. As it happens each word starts
at an index that is a multiple of 12, but this routine does not care
about that."""
r = r.split()
# :todo: raise FormatError
assert len(r) == 5
r = [r[0]] + map(int, r[1:])
return r
def bitdepthof(pixel):
"""Return the bitdepth for a Plan9 pixel format string."""
maxd = 0
for c in re.findall(r'[a-z]\d*', pixel):
if c[0] != 'x':
maxd = max(maxd, int(c[1:]))
return maxd
def maxvalof(pixel):
"""Return the netpbm MAXVAL for a Plan9 pixel format string."""
bitdepth = bitdepthof(pixel)
return (2 ** bitdepth) - 1
def pixmeta(metadata, f):
"""Convert (uncompressed) Plan 9 image file to pair of (*metadata*,
*pixels*). This is intended to be used by PyPNG format. *metadata*
is the metadata returned in a dictionary, *pixels* is an iterator that
yields each row in boxed row flat pixel format.
`f`, the input file, should be cued up to the start of the image data.
"""
chan, minx, miny, limx, limy = metadata
rows = limy - miny
width = limx - minx
nchans = len(re.findall('[a-wyz]', chan))
alpha = 'a' in chan
# Iverson's convention for the win!
ncolour = nchans - alpha
greyscale = ncolour == 1
bitdepth = bitdepthof(chan)
maxval = 2**bitdepth - 1
# PNG style metadata
meta = dict(size=(width, rows), bitdepth=bitdepthof(chan),
greyscale=greyscale, alpha=alpha, planes=nchans)
return itertools.imap(
lambda x: itertools.chain(*x),
block(unpack(f, rows, width, chan, maxval), width)), meta
def png(out, metadata, f):
"""Convert to PNG format.
`metadata` should be a Plan9 5-tuple;
`f` the input file (see :meth:`pixmeta`).
"""
import png
pixels, meta = pixmeta(metadata, f)
p = png.Writer(**meta)
p.write(out, pixels)
def unpack(f, rows, width, pixel, maxval):
"""Unpack `f` into pixels.
Assumes the pixel format is such that
the depth is either a multiple or a divisor of 8.
`f` is assumed to be an iterator that returns blocks of input such
that each block contains a whole number of pixels. An iterator is
returned that yields each pixel as an n-tuple. `pixel` describes the
pixel format using the Plan9 syntax ("k8", "r8g8b8", and so on).
"""
def mask(w):
"""An integer, to be used as a mask, with bottom `w` bits set to 1."""
return (1 << w) - 1
def deblock(f, depth, width):
"""A "packer" used to convert multiple bytes into single pixels.
`depth` is the pixel depth in bits (>= 8), `width` is the row width in
pixels.
"""
w = depth // 8
i = 0
for block in f:
for i in range(len(block) // w):
p = block[w * i: w * (i + 1)]
i += w
# Convert p to little-endian integer, x
x = 0
s = 1 # scale
for j in p:
x += s * ord(j)
s <<= 8
yield x
def bitfunge(f, depth, width):
"""A "packer" used to convert single bytes into multiple pixels.
Depth is the pixel depth (< 8), width is the row width in pixels.
"""
for block in f:
col = 0
for i in block:
x = ord(i)
for j in range(8 / depth):
yield x >> (8 - depth)
col += 1
if col == width:
# A row-end forces a new byte even if
# we haven't consumed all of the current byte.
# Effectively rows are bit-padded to make
# a whole number of bytes.
col = 0
break
x <<= depth
# number of bits in each channel
chan = map(int, re.findall(r'\d+', pixel))
# type of each channel
type = re.findall('[a-z]', pixel)
depth = sum(chan)
# According to the value of depth pick a "packer" that either gathers
# multiple bytes into a single pixel (for depth >= 8) or split bytes
# into several pixels (for depth < 8)
if depth >= 8:
assert depth % 8 == 0
packer = deblock
else:
assert 8 % depth == 0
packer = bitfunge
for x in packer(f, depth, width):
# x is the pixel as an unsigned integer
o = []
# This is a bit yucky. Extract each channel from the _most_
# significant part of x.
for j in range(len(chan)):
v = (x >> (depth - chan[j])) & mask(chan[j])
x <<= chan[j]
if type[j] != 'x':
# scale to maxval
v = v * float(maxval) / mask(chan[j])
v = int(v + 0.5)
o.append(v)
yield o
def decompress(f):
"""Decompress a Plan 9 image file. Assumes f is already cued past the
initial 'compressed\n' string.
"""
r = meta(f.read(60))
return r, decomprest(f, r[4])
def decomprest(f, rows):
"""Iterator that decompresses the rest of a file once the metadata
have been consumed."""
row = 0
while row < rows:
row, o = deblock(f)
yield o
def deblock(f):
"""Decompress a single block from a compressed Plan 9 image file.
Each block starts with 2 decimal strings of 12 bytes each. Yields a
sequence of (row, data) pairs where row is the total number of rows
processed according to the file format and data is the decompressed
data for a set of rows."""
row = int(f.read(12))
size = int(f.read(12))
if not (0 <= size <= 6000):
raise Error('block has invalid size; not a Plan 9 image file?')
# Since each block is at most 6000 bytes we may as well read it all in
# one go.
d = f.read(size)
i = 0
o = []
while i < size:
x = ord(d[i])
i += 1
if x & 0x80:
x = (x & 0x7f) + 1
lit = d[i: i + x]
i += x
o.extend(lit)
continue
# x's high-order bit is 0
length = (x >> 2) + 3
# Offset is made from bottom 2 bits of x and all 8 bits of next
# byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't
# say whether x's 2 bits are most significant or least significant.
# But it is clear from inspecting a random file,
# http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit
# that x's 2 bits are most significant.
offset = (x & 3) << 8
offset |= ord(d[i])
i += 1
# Note: complement operator neatly maps (0 to 1023) to (-1 to
# -1024). Adding len(o) gives a (non-negative) offset into o from
# which to start indexing.
offset = ~offset + len(o)
if offset < 0:
raise Error('byte offset indexes off the begininning of '
'the output buffer; not a Plan 9 image file?')
for j in range(length):
o.append(o[offset + j])
return row, ''.join(o)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(sys.argv) <= 1:
return convert(sys.stdin)
else:
return convert(open(argv[1], 'rb'))
if __name__ == '__main__':
sys.exit(main())
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FileBlobIndex'
db.create_table(
'sentry_fileblobindex', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'file', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.File']
)
), (
'blob', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.FileBlob']
)
), (
'offset',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()
),
)
)
db.send_create_signal('sentry', ['FileBlobIndex'])
# Adding unique constraint on 'FileBlobIndex', fields ['file', 'blob', 'offset']
db.create_unique('sentry_fileblobindex', ['file_id', 'blob_id', 'offset'])
def backwards(self, orm):
# Removing unique constraint on 'FileBlobIndex', fields ['file', 'blob', 'offset']
db.delete_unique('sentry_fileblobindex', ['file_id', 'blob_id', 'offset'])
# Deleting model 'FileBlobIndex'
db.delete_table('sentry_fileblobindex')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2015, 12, 29, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self,
worker_command_line, # type: bytes
control_address,
worker_id=None
):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor
)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_api_runner.ExtendedProvisionInfo
artifact_staging_endpoint # type: Optional[endpoints_pb2.ApiServiceDescriptor]
):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.',
time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
|
"""
ADDED CONSTRAINT: density > 10^4
(actually, density > dust-derived mean density, or 10^3, whichever is higher)
For each of the fitted spectra from individual_spectra.py, use the fitted ratio
(and appropriate ancillary data, e.g. h2 column) to derive the best fit
temperature, etc.
Store them in an output table and plot by calling
execfile(paths.pcpath('parameter_comparisons.py'))
"""
import paths
import os
import matplotlib
#matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
import pylab as pl
import numpy as np
from scipy import stats
from astropy import log
from astropy import units as u
from astropy import constants
from astropy import coordinates
from paths import analysispath, tpath
from pyspeckit_fitting import (texgrid303, taugrid303, texgrid321, taugrid321,
texgrid322, taugrid322, hdr)
# use the local constrain_parameters; h2co_modeling's version is... not exactly up to date.
from constrain_parameters import paraH2COmodel
from h2co_modeling import grid_fitter
from astropy import table
import despotic_heating as heating
pl.rcParams['font.size'] = 16.0
pl.close(4)
# mf means modelfitter
mf = paraH2COmodel()
fittable = table.Table.read(tpath("fitted_line_parameters.ipac"),
format='ascii.ipac')
fittable.add_columns([table.Column(name=name, dtype='float', length=len(fittable))
for name in ['temperature_chi2','tmin1sig_chi2','tmax1sig_chi2',
'expected_temperature',
'column_chi2','cmin1sig_chi2','cmax1sig_chi2',
'expected_column',
'density_chi2','dmin1sig_chi2','dmax1sig_chi2',
'expected_density',
'logh2column','elogh2column',
'logabundance','elogabundance',
'tkin_turb', 'reff_pc',
]])
if not os.path.exists(paths.fpath('param_fits')):
os.makedirs(paths.fpath('param_fits'))
nlevs = 4
#levels = [stats.norm.cdf(ii)-stats.norm.cdf(-ii)
# for ii in range(1,nlevs)]
ndof = 3
levels = ([0]+
[stats.chi2.ppf(stats.norm.cdf(ii)-stats.norm.cdf(-ii),
ndof)
for ii in range(1,nlevs)])
density_label = 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]'
column_label = 'Column p-H$_2$CO [log cm$^{-2}$/(km s$^{-1}$ pc)]'
density_label_short = "$n(\mathrm{H}_2) (\mathrm{cm}^{-3})$"
column_label_short = "$N(\mathrm{p-H}_2\mathrm{CO}) (\mathrm{cm}^{-2})$"
temperature_label = 'Temperature (K)'
prevname = ''
num = 0
for row in fittable:
if row['Source_Name'] == prevname:
num += 1
else:
num = 0
prevname = row['Source_Name']
log.info("Fitting {0}_{1}".format(row['Source_Name'],num))
logh2column = np.log10(row['higalcolumndens'])
elogh2column = 1.0
linewidth = row['spline_width']
elinewidth = row['espline_width']
par1 = row['spline_ampH2CO']
epar1 = row['espline_ampH2CO']
par2 = row['spline_ampH2CO']*row['spline_h2coratio321303']
epar2 = row['spline_ampH2CO']*row['espline_h2coratio321303']
#match,indbest,chi2b = grid_fitter.grid_2p_getmatch(par1, epar1, tline303,
# par2, epar2, tline321)
ratio = row['spline_h2coratio321303']
eratio = row['espline_h2coratio321303']
ratio2 = row['spline_h2coratio322321']
eratio2 = row['espline_h2coratio322321']
if ratio == 0 or np.isnan(ratio):
print("Skipped {0} because it had no fit ratio".format(row['Source_Name']))
continue
# We can impose a "loose" abundance constraint
# Given that we know the H2 density, and the line width is ~5-10 km/s,
# abundance = column / pc / density
# We'll say abundance = 1.2e9 with error 0.6e9
# Or, log(abundance) = log(1.2e9) +/- 1
logabundance = np.log10(1.2e-9)
elogabundance = 1.0
# Constraint from density
r_deg = (row['area']/np.pi)**0.5 * u.deg
reff = (r_deg*(8.5*u.kpc)).to(u.pc, u.dimensionless_angles())
mass = ((10**logh2column*u.cm**-2)*np.pi*reff**2*2.8*constants.m_p).to(u.M_sun)
# reff = mean of major and minor axes of elliptical. This is not a bad
# assumption... we COULD use the HWHM instead here, though.
density = (mass/(4/3.*np.pi*reff**3)/constants.m_p/2.8).to(u.cm**-3)
mindens = np.log10(density.value)
if mindens < 3:
mindens = 3
# Combined abundance + total column constraint
# N(H2CO) * dv * X = N(H2)
# We are effectively ignoring errors in the linewidth here:
# (noop - see chi2_h2)
# Even though the lines are subject to filling-factor uncertainty, we can
# set a *minimum* brightness in the models. Given that we observe a line
# brightness T_A, the true brightness is T_B = T_A/ff, where ff<1 by
# definition
# We therefore *increase* the chi^2 value wherever the model is fainter
# than the line, enforcing a soft lower limit
mf.set_constraints(ratio321303=ratio, eratio321303=eratio,
ratio321322=ratio2, eratio321322=eratio2,
logh2column=logh2column, elogh2column=elogh2column,
logabundance=logabundance, elogabundance=elogabundance,
taline303=par1, etaline303=epar1,
taline321=par2, etaline321=epar2,
mindens=mindens,
linewidth=10) # for consistency with dendro
chi2r = mf.chi2_r321303
chi2r2 = mf.chi2_r321322
chi2_h2 = mf.chi2_h2
chi2X = mf.chi2_X
chi2_1 = mf.chi2_ff1
chi2_2 = mf.chi2_ff2
chi2_ff = chi2_1+chi2_2
chi2_dens = mf.chi2_dens
chi2b = chi2r + chi2_ff + chi2X + chi2_h2 + chi2_dens
match = chi2b < 1
indbest,match = grid_fitter.getmatch(chi2b, match)
sh = match.shape
(zz,yy,xx) = np.unravel_index(indbest, sh)
fig1 = pl.figure(1, figsize=(12,12))
fig1.clf()
vmin = np.max([mf.tline303.min(), 0.1])
vmax = np.min([mf.tline303.max(), par1+10])
ax1 = pl.subplot(3,3,1)
im1 = pl.imshow(mf.tline303[zz,:,:], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.darr, chi2b[zz,:,:], levels=chi2b.min()+levels)
# only label middle row
#pl.ylabel(column_label)
pl.xlabel(column_label_short)
ax1.set_xticks(np.arange(11,16))
ax2 = pl.subplot(3,3,2)
im2 = pl.imshow(mf.tline303[:,yy,:], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.tarr, chi2b[:,yy,:], levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax2.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
#ax2.set_title("p-H$_2$CO $3_{0,3}-2_{0,2}$")
ax3 = pl.subplot(3,3,3)
im3 = pl.imshow(mf.tline303[:,:,xx], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.darr, mf.tarr, chi2b[:,:,xx], levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax3.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
cax = fig1.add_axes([0.91,0.68,0.02,0.22])
cb = fig1.colorbar(mappable=im3, cax=cax, ax=ax2)
cb.set_label("$T_B$ (p-H$_2$CO $3_{0,3}-2_{0,2}$)")
vmin = np.max([mf.tline321.min(), 0.1])
vmax = np.min([mf.tline321.max(), par2+10])
ax4 = pl.subplot(3,3,4)
pl.imshow(mf.tline321[zz,:,:], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.darr, chi2b[zz,:,:], levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax4.set_xticks(np.arange(11,16))
pl.ylabel(density_label_short)
ax5 = pl.subplot(3,3,5)
im5 = pl.imshow(mf.tline321[:,yy,:], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.tarr, chi2b[:,yy,:], levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax5.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
#ax5.set_title("p-H$_2$CO $3_{2,1}-2_{2,0}$")
ax6 = pl.subplot(3,3,6)
im6 = pl.imshow(mf.tline321[:,:,xx], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.darr, mf.tarr, chi2b[:,:,xx], levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax6.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
cax = fig1.add_axes([0.91,0.40,0.02,0.22])
cb = fig1.colorbar(mappable=im6, cax=cax, ax=ax5)
cb.set_label("$T_B$ (p-H$_2$CO $3_{2,1}-2_{2,0}$)")
vminr = 0.05
vmaxr = 0.7
ax7 = pl.subplot(3,3,7)
im7 = ax7.imshow(mf.modelratio1[zz,:,:], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.carr, mf.darr, chi2b[zz,:,:], levels=chi2b.min()+levels)
# only label middle row
#pl.ylabel(column_label_short)
pl.xlabel(column_label_short)
ax7.set_xticks(np.arange(11,16))
ax8 = pl.subplot(3,3,8)
im8 = ax8.imshow(mf.modelratio1[:,yy,:], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.carr, mf.tarr, chi2b[:,yy,:], levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax8.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
#ax2.set_title("p-H$_2$CO $3_{0,3}-2_{0,2}$")
ax9 = pl.subplot(3,3,9)
im9 = ax9.imshow(mf.modelratio1[:,:,xx], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.darr, mf.tarr, chi2b[:,:,xx], levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax9.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
cax3 = fig1.add_axes([0.91,0.1,0.02,0.22])
cb = fig1.colorbar(mappable=im8, cax=cax3, ax=ax8)
cb.ax.hlines(cb.norm((ratio+eratio, ratio-eratio)), 0, 1, color='r', linestyle='-', alpha=0.5)
cb.ax.hlines(cb.norm((ratio)), 0, 1, color='b', linestyle=':', linewidth=1, alpha=0.5)
cb.set_label("$3_{2,1}-2_{2,0}$ / $3_{0,3}-2_{0,2}$")
pl.suptitle(row['Source_Name'])
pl.subplots_adjust(wspace=0.33, hspace=0.25, left=0.1)
pl.savefig(paths.fpath('param_fits/{name}_{num}_h2coratio.pdf'.format(name=row['Source_Name'],
num=num)), bbox_inches='tight')
fig2 = pl.figure(2, figsize=(12,12))
fig2.clf()
ax1 = pl.subplot(3,3,1)
yi, xi = np.indices(mf.tline303.shape[1:])
inds = [chi2b.argmin(axis=0), yi, xi]
# The background from taking the min-chi^2 along each axis is too ugly and
# hard to explain: revert to using a *slice* for a background but a chi^2
# *projection* for the contours
inds = [zz, slice(None), slice(None)]
pl.imshow(mf.tline303[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.darr, chi2b.min(axis=0), levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax1.set_xticks(np.arange(11,16))
#pl.ylabel(density_label_short)
ax2 = pl.subplot(3,3,2)
zi, xi = np.indices([mf.tline303.shape[0], mf.tline303.shape[2],])
inds = [zi, chi2b.argmin(axis=1), xi]
inds = [slice(None), yy, slice(None)]
pl.imshow(mf.tline303[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.tarr, chi2b.min(axis=1), levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax2.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
ax3 = pl.subplot(3,3,3)
zi, yi = np.indices([mf.tline303.shape[0], mf.tline303.shape[2],])
inds = [zi, yi, chi2b.argmin(axis=2)]
inds = [slice(None), slice(None), xx]
pl.imshow(mf.tline303[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.darr, mf.tarr, chi2b.min(axis=2), levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax3.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
#cax = fig2.add_axes([0.91,0.55,0.02,0.35])
#cb = fig2.colorbar(mappable=im3, cax=cax, ax=ax2)
#cb.set_label("$T_B$ (p-H$_2$CO $3_{0,3}-2_{0,2}$)")
cax = fig2.add_axes([0.91,0.68,0.02,0.22])
cb = fig2.colorbar(mappable=im3, cax=cax, ax=ax2)
cb.set_label("$T_B$ (p-H$_2$CO $3_{0,3}-2_{0,2}$)")
ax4 = pl.subplot(3,3,4)
yi, xi = np.indices(mf.tline303.shape[1:])
inds = [chi2b.argmin(axis=0), yi, xi]
inds = [zz, slice(None), slice(None)]
pl.imshow(mf.tline321[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.darr, chi2b.min(axis=0), levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax4.set_xticks(np.arange(11,16))
pl.ylabel(density_label_short)
ax5 = pl.subplot(3,3,5)
zi, xi = np.indices([mf.tline303.shape[0], mf.tline303.shape[2],])
inds = [zi, chi2b.argmin(axis=1), xi]
inds = [slice(None), yy, slice(None)]
pl.imshow(mf.tline321[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.carr, mf.tarr, chi2b.min(axis=1), levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax5.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
ax6 = pl.subplot(3,3,6)
zi, yi = np.indices([mf.tline303.shape[0], mf.tline303.shape[2],])
inds = [zi, yi, chi2b.argmin(axis=2)]
inds = [slice(None), slice(None), xx]
im6 = pl.imshow(mf.tline321[inds], cmap=pl.cm.gray_r, interpolation='spline36',
norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vmin, vmax=vmax)
pl.contour(mf.darr, mf.tarr, chi2b.min(axis=2), levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax6.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
#cax = fig2.add_axes([0.91,0.1,0.02,0.35])
#cb = fig2.colorbar(mappable=im6, cax=cax, ax=ax5)
#cb.set_label("$T_B$ (p-H$_2$CO $3_{2,1}-2_{2,0}$)")
cax = fig2.add_axes([0.91,0.40,0.02,0.22])
cb = fig2.colorbar(mappable=im6, cax=cax, ax=ax5)
cb.set_label("$T_B$ (p-H$_2$CO $3_{2,1}-2_{2,0}$)")
vminr = 0.05
vmaxr = 0.7
ax7 = pl.subplot(3,3,7)
im7 = ax7.imshow(mf.modelratio1[zz,:,:], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
extent=mf.crange+mf.drange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.carr, mf.darr, chi2b.min(axis=0), levels=chi2b.min()+levels)
# only label middle row
#pl.ylabel(column_label_short)
pl.xlabel(column_label_short)
ax7.set_xticks(np.arange(11,16))
ax8 = pl.subplot(3,3,8)
im8 = ax8.imshow(mf.modelratio1[:,yy,:], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.crange)/np.diff(mf.trange),
extent=mf.crange+mf.trange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.carr, mf.tarr, chi2b.min(axis=1), levels=chi2b.min()+levels)
pl.xlabel(column_label_short)
ax8.set_xticks(np.arange(11,16))
pl.ylabel(temperature_label)
#ax2.set_title("p-H$_2$CO $3_{0,3}-2_{0,2}$")
ax9 = pl.subplot(3,3,9)
im9 = ax9.imshow(mf.modelratio1[:,:,xx], cmap=pl.cm.gray_r,
interpolation='spline36',
#norm=pl.matplotlib.colors.LogNorm(),
aspect=np.diff(mf.drange)/np.diff(mf.trange),
extent=mf.drange+mf.trange, vmin=vminr, vmax=vmaxr)
pl.contour(mf.darr, mf.tarr, chi2b.min(axis=2), levels=chi2b.min()+levels)
pl.xlabel(density_label_short)
ax9.xaxis.set_ticks(np.arange(mf.darr.min(), mf.darr.max()))
pl.ylabel(temperature_label)
cax3 = fig2.add_axes([0.91,0.1,0.02,0.22])
cb = fig2.colorbar(mappable=im8, cax=cax3, ax=ax8)
cb.ax.hlines(cb.norm((ratio+eratio, ratio-eratio)), 0, 1, color='r', linestyle='-', alpha=0.5)
cb.ax.hlines(cb.norm((ratio)), 0, 1, color='b', linestyle=':', linewidth=1, alpha=0.5)
cb.set_label("$3_{2,1}-2_{2,0}$ / $3_{0,3}-2_{0,2}$")
pl.suptitle(row['Source_Name'])
pl.subplots_adjust(wspace=0.33, left=0.1, hspace=0.25)
pl.savefig(paths.fpath('param_fits/{name}_{num}_h2coratio_minaxis.pdf'.format(name=row['Source_Name'],
num=num)), bbox_inches='tight')
# IGNORE 321/322: it is generally not well constrained anyway
mf.chi2_r321322 = 0
mf.compute_chi2_fromcomponents()
for par1,par2 in (('dens','col'),('dens','tem'),('col','tem')):
ptype = '{0}_{1}'.format(par1,par2)
fig3 = pl.figure(3)
fig3.clf()
mf.parplot(par1=par1, par2=par2)
outf = paths.fpath('param_fits/{name}_{ptype}_{num}_parameter_constraints.pdf'.format(name=row['Source_Name'],
ptype=ptype,
num=num))
pl.savefig(outf, bbox_inches='tight')
pl.figure(4, figsize=(12,16))
# levels[0] = 0.68
mf.parplot1d_all(levels=[0.68])
pl.subplots_adjust(hspace=0.45)
outf = paths.fpath('param_fits/{name}_oneD_{num}_parameter_constraints.pdf'.format(name=row['Source_Name'],
num=num))
pl.savefig(outf, bbox_inches='tight')
row_data = mf.get_parconstraints()
for key,value in row_data.iteritems():
row[key] = value
width = row['width']*u.km/u.s
row['reff_pc'] = reff.to(u.pc).value
row['tkin_turb'] = heating.tkin_all(density=10**row['density_chi2']*u.cm**-3,
sigma=width,
lengthscale=reff,
gradient=width/reff,
tdust=row['higaldusttem']*u.K,
crir=0./u.s)
#if row_data['temperature_chi2'] == 10:
# import ipdb; ipdb.set_trace()
log.info("Completed source loop.")
fittable.write(tpath('fitted_line_parameters_Chi2Constraints.ipac'),
format='ascii.ipac')
log.info("Wrote table file. Continuing to parameter plots.")
execfile(paths.pcpath('parameter_comparisons.py'))
pl.show()
|
|
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIRequestFactory
from oauth_api.models import get_application_model, AuthorizationCode, AccessToken
from oauth_api.tests.utils import TestCaseUtils
from oauth_api.tests.views import RESPONSE_DATA, ResourceNoScopesView
Application = get_application_model()
User = get_user_model()
class BaseTest(TestCaseUtils):
@classmethod
def setUpTestData(cls):
cls.test_user = User.objects.create_user('test_user', 'test_user@example.com', '1234')
cls.dev_user = User.objects.create_user('dev_user', 'dev_user@example.com', '1234')
cls.application = Application(
name='Test Application',
redirect_uris='http://localhost http://example.com',
user=cls.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
cls.application.save()
cls.factory = APIRequestFactory()
class TestScopes(BaseTest):
def test_scopes_in_authorization_code(self):
"""
Test scopes are properly saved in authorization codes
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='write scope1')
ac = AuthorizationCode.objects.get(code=authorization_code)
self.assertEqual(ac.scope, 'write scope1')
def test_scopes_in_access_token(self):
"""
Test scopes are properly saved in access tokens
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='write scope1')
token_request = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://localhost',
}
self.client.credentials(HTTP_AUTHORIZATION=self.get_basic_auth(self.application.client_id,
self.application.client_secret))
response = self.client.post(reverse('oauth_api:token'), token_request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('access_token' in response.data)
access_token = response.data['access_token']
token = AccessToken.objects.get(token=access_token)
self.assertEqual(token.scope, 'write scope1')
class TestScopesResourceViews(BaseTest):
def test_required_scopes_valid(self):
"""
Test access to resource protected by required_scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='read write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-view'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, RESPONSE_DATA)
def test_required_scopes_invalid(self):
"""
Test access to resource protected by required_scope with incorrect scopes
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_read_scopes_valid(self):
"""
Test access to resource protected by read_scopes
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='read')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-read-view'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, RESPONSE_DATA)
def test_read_scopes_invalid(self):
"""
Test access to resource protected by read_scopes with invalid scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-read-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_write_scopes_valid(self):
"""
Test access to resource protected by write_scopes
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-write-view'))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, RESPONSE_DATA)
def test_write_scopes_invalid(self):
"""
Test access to resource protected by write_scopes with invalid scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-write-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_readwrite_scopes_valid_read(self):
"""
Test access to resource protected by read_scopes and write_scopes with valid read scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='read')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-readwrite-view'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, RESPONSE_DATA)
def test_readwrite_scopes_invalid_read(self):
"""
Test access to resource protected by read_scopes and write_scopes with invalid read scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-readwrite-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_readwrite_scopes_valid_write(self):
"""
Test access to resource protected by read_scopes and write_scopes with valid write scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-readwrite-view'))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, RESPONSE_DATA)
def test_readwrite_scopes_invalid_write(self):
"""
Test access to resource protected by read_scopes and write_scopes with invalid write scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-readwrite-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_mixed_scopes_invalid_required(self):
"""
Test access to resource protected by required_scopes, read_scopes and write_scopes with invalid required
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='read write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-mixed-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_mixed_scopes_valid_read(self):
"""
Test access to resource protected by required_scopes, read_scopes and write_scopes with valid read scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1 read')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-mixed-view'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, RESPONSE_DATA)
def test_mixed_scopes_invalid_read(self):
"""
Test access to resource protected by required_scopes, read_scopes and write_scopes with invalid read scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1 write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.get(reverse('resource-mixed-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_mixed_scopes_valid_write(self):
"""
Test access to resource protected by required_scopes, read_scopes and write_scopes with valid write scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1 write')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-readwrite-view'))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, RESPONSE_DATA)
def test_mixed_scopes_invalid_write(self):
"""
Test access to resource protected by required_scopes, read_scopes and write_scopes with invalid write scope
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='scope1 read')
access_token = self.get_access_token(authorization_code)
self.client.credentials(HTTP_AUTHORIZATION='Bearer %s' % access_token)
response = self.client.post(reverse('resource-mixed-view'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_improperly_configured(self):
"""
Test access to resource without any required scopes defined
"""
self.client.login(username='test_user', password='1234')
authorization_code = self.get_authorization_code(scopes='read')
access_token = self.get_access_token(authorization_code)
headers = {
'HTTP_AUTHORIZATION': 'Bearer %s' % access_token,
}
request = self.factory.get('/fake', **headers)
view = ResourceNoScopesView.as_view()
self.assertRaises(ImproperlyConfigured, view, request)
|
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
line_search_armijo
line_search_wolfe1
line_search_wolfe2
scalar_search_wolfe1
scalar_search_wolfe2
"""
from __future__ import division, print_function, absolute_import
from scipy.optimize import minpack2
import numpy as np
from scipy.lib.six.moves import xrange
__all__ = ['line_search_wolfe1', 'line_search_wolfe2',
'scalar_search_wolfe1', 'scalar_search_wolfe2',
'line_search_armijo']
#------------------------------------------------------------------------------
# Minpack's Wolfe line and scalar searches
#------------------------------------------------------------------------------
def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
old_fval=None, old_old_fval=None,
args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
xtol=1e-14):
"""
As `scalar_search_wolfe1` but do a line search to direction `pk`
Parameters
----------
f : callable
Function `f(x)`
fprime : callable
Gradient of `f`
xk : array_like
Current point
pk : array_like
Search direction
gfk : array_like, optional
Gradient of `f` at point `xk`
old_fval : float, optional
Value of `f` at point `xk`
old_old_fval : float, optional
Value of `f` at point preceding `xk`
The rest of the parameters are the same as for `scalar_search_wolfe1`.
Returns
-------
stp, f_count, g_count, fval, old_fval
As in `line_search_wolfe1`
gval : array
Gradient of `f` at the final point
"""
if gfk is None:
gfk = fprime(xk)
if isinstance(fprime, tuple):
eps = fprime[1]
fprime = fprime[0]
newargs = (f, eps) + args
gradient = False
else:
newargs = args
gradient = True
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f(xk + s*pk, *args)
def derphi(s):
gval[0] = fprime(xk + s*pk, *newargs)
if gradient:
gc[0] += 1
else:
fc[0] += len(xk) + 1
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
stp, fval, old_fval = scalar_search_wolfe1(
phi, derphi, old_fval, old_old_fval, derphi0,
c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return stp, fc[0], gc[0], fval, old_fval, gval[0]
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9,
amax=50, amin=1e-8, xtol=1e-14):
"""
Scalar function search for alpha that satisfies strong Wolfe conditions
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable phi(alpha)
Function at point `alpha`
derphi : callable dphi(alpha)
Derivative `d phi(alpha)/ds`. Returns a scalar.
phi0 : float, optional
Value of `f` at 0
old_phi0 : float, optional
Value of `f` at the previous point
derphi0 : float, optional
Value `derphi` at 0
amax : float, optional
Maximum step size
c1, c2 : float, optional
Wolfe parameters
Returns
-------
alpha : float
Step size, or None if no suitable step was found
phi : float
Value of `phi` at the new point `alpha`
phi0 : float
Value of `phi` at `alpha=0`
Notes
-----
Uses routine DCSRCH from MINPACK.
"""
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None:
derphi0 = derphi(0.)
if old_phi0 is not None:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
if alpha1 < 0:
alpha1 = 1.0
else:
alpha1 = 1.0
phi1 = phi0
derphi1 = derphi0
isave = np.zeros((2,), np.intc)
dsave = np.zeros((13,), float)
task = b'START'
maxiter = 30
for i in xrange(maxiter):
stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
c1, c2, xtol, task,
amin, amax, isave, dsave)
if task[:2] == b'FG':
alpha1 = stp
phi1 = phi(stp)
derphi1 = derphi(stp)
else:
break
else:
# maxiter reached, the line search did not converge
stp = None
if task[:5] == b'ERROR' or task[:4] == b'WARN':
stp = None # failed
return stp, phi1, phi0
line_search = line_search_wolfe1
#------------------------------------------------------------------------------
# Pure-Python Wolfe line and scalar searches
#------------------------------------------------------------------------------
def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=50):
"""Find alpha that satisfies strong Wolfe conditions.
Parameters
----------
f : callable f(x,*args)
Objective function.
myfprime : callable f'(x,*args)
Objective function gradient.
xk : ndarray
Starting point.
pk : ndarray
Search direction.
gfk : ndarray, optional
Gradient value for x=xk (xk being the current parameter
estimate). Will be recomputed if omitted.
old_fval : float, optional
Function value for x=xk. Will be recomputed if omitted.
old_old_fval : float, optional
Function value for the point preceding x=xk
args : tuple, optional
Additional arguments passed to objective function.
c1 : float, optional
Parameter for Armijo condition rule.
c2 : float, optional
Parameter for curvature condition rule.
Returns
-------
alpha0 : float
Alpha for which ``x_new = x0 + alpha * pk``.
fc : int
Number of function evaluations made.
gc : int
Number of gradient evaluations made.
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
fc = [0]
gc = [0]
gval = [None]
def phi(alpha):
fc[0] += 1
return f(xk + alpha * pk, *args)
if isinstance(myfprime, tuple):
def derphi(alpha):
fc[0] += len(xk) + 1
eps = myfprime[1]
fprime = myfprime[0]
newargs = (f, eps) + args
gval[0] = fprime(xk + alpha * pk, *newargs) # store for later use
return np.dot(gval[0], pk)
else:
fprime = myfprime
def derphi(alpha):
gc[0] += 1
gval[0] = fprime(xk + alpha * pk, *args) # store for later use
return np.dot(gval[0], pk)
if gfk is None:
gfk = fprime(xk)
derphi0 = np.dot(gfk, pk)
alpha_star, phi_star, old_fval, derphi_star = \
scalar_search_wolfe2(phi, derphi, old_fval, old_old_fval,
derphi0, c1, c2, amax)
if derphi_star is not None:
# derphi_star is a number (derphi) -- so use the most recently
# calculated gradient used in computing it derphi = gfk*pk
# this is the gradient at the next step no need to compute it
# again in the outer loop.
derphi_star = gval[0]
return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
def scalar_search_wolfe2(phi, derphi=None, phi0=None,
old_phi0=None, derphi0=None,
c1=1e-4, c2=0.9, amax=50):
"""Find alpha that satisfies strong Wolfe conditions.
alpha > 0 is assumed to be a descent direction.
Parameters
----------
phi : callable f(x,*args)
Objective scalar function.
derphi : callable f'(x,*args), optional
Objective function derivative (can be None)
phi0 : float, optional
Value of phi at s=0
old_phi0 : float, optional
Value of phi at previous point
derphi0 : float, optional
Value of derphi at s=0
args : tuple
Additional arguments passed to objective function.
c1 : float
Parameter for Armijo condition rule.
c2 : float
Parameter for curvature condition rule.
Returns
-------
alpha_star : float
Best alpha
phi_star
phi at alpha_star
phi0
phi at 0
derphi_star
derphi at alpha_star
Notes
-----
Uses the line search algorithm to enforce strong Wolfe
conditions. See Wright and Nocedal, 'Numerical Optimization',
1999, pg. 59-60.
For the zoom phase it uses an algorithm by [...].
"""
if phi0 is None:
phi0 = phi(0.)
if derphi0 is None and derphi is not None:
derphi0 = derphi(0.)
alpha0 = 0
if old_phi0 is not None:
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
else:
alpha1 = 1.0
if alpha1 < 0:
alpha1 = 1.0
if alpha1 == 0:
# This shouldn't happen. Perhaps the increment has slipped below
# machine precision? For now, set the return variables skip the
# useless while loop, and raise warnflag=2 due to possible imprecision.
alpha_star = None
phi_star = phi0
phi0 = old_phi0
derphi_star = None
phi_a1 = phi(alpha1)
#derphi_a1 = derphi(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
i = 1
maxiter = 10
for i in xrange(maxiter):
if alpha1 == 0:
break
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
((phi_a1 >= phi_a0) and (i > 1)):
alpha_star, phi_star, derphi_star = \
_zoom(alpha0, alpha1, phi_a0,
phi_a1, derphi_a0, phi, derphi,
phi0, derphi0, c1, c2)
break
derphi_a1 = derphi(alpha1)
if (abs(derphi_a1) <= -c2*derphi0):
alpha_star = alpha1
phi_star = phi_a1
derphi_star = derphi_a1
break
if (derphi_a1 >= 0):
alpha_star, phi_star, derphi_star = \
_zoom(alpha1, alpha0, phi_a1,
phi_a0, derphi_a1, phi, derphi,
phi0, derphi0, c1, c2)
break
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
i = i + 1
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi(alpha1)
derphi_a0 = derphi_a1
else:
# stopping test maxiter reached
alpha_star = alpha1
phi_star = phi_a1
derphi_star = None
return alpha_star, phi_star, phi0, derphi_star
def _cubicmin(a, fa, fpa, b, fb, c, fc):
"""
Finds the minimizer for a cubic polynomial that goes through the
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
If no minimizer can be found return None
"""
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
C = fpa
db = b - a
dc = c - a
if (db == 0) or (dc == 0) or (b == c):
return None
denom = (db * dc) ** 2 * (db - dc)
d1 = np.empty((2, 2))
d1[0, 0] = dc ** 2
d1[0, 1] = -db ** 2
d1[1, 0] = -dc ** 3
d1[1, 1] = db ** 3
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
fc - fa - C * dc]).flatten())
A /= denom
B /= denom
radical = B * B - 3 * A * C
if radical < 0:
return None
if A == 0:
return None
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
return xmin
def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,
"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
D = fa
C = fpa
db = b - a * 1.0
if db == 0:
return None
B = (fb - D - C * db) / (db * db)
if B <= 0:
return None
xmin = a - C / (2.0 * B)
return xmin
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
phi, derphi, phi0, derphi0, c1, c2):
"""
Part of the optimization algorithm in `scalar_search_wolfe2`.
"""
maxiter = 10
i = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
while True:
# interpolate to find a trial step length between a_lo and
# a_hi Need to choose interpolation here. Use cubic
# interpolation and then if the result is within delta *
# dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too
# close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
#
# if the result is too close to the end points (or out of the
# interval) then use quadratic interpolation with phi_lo,
# derphi_lo and phi_hi if the result is stil too close to the
# end points (or out of the interval) then use bisection
if (i > 0):
cchk = delta1 * dalpha
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
a_rec, phi_rec)
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
qchk = delta2 * dalpha
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
a_j = a_lo + 0.5*dalpha
# Check new value of a_j
phi_aj = phi(a_j)
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
derphi_aj = derphi(a_j)
if abs(derphi_aj) <= -c2*derphi0:
a_star = a_j
val_star = phi_aj
valprime_star = derphi_aj
break
if derphi_aj*(a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
i += 1
if (i > maxiter):
a_star = a_j
val_star = phi_aj
valprime_star = None
break
return a_star, val_star, valprime_star
#------------------------------------------------------------------------------
# Armijo line and scalar searches
#------------------------------------------------------------------------------
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""Minimize over alpha, the function ``f(xk+alpha pk)``.
Parameters
----------
f : callable
Function to be minimized.
xk : array_like
Current point.
pk : array_like
Search direction.
gfk : array_like
Gradient of `f` at point `xk`.
old_fval : float
Value of `f` at point `xk`.
args : tuple, optional
Optional arguments.
c1 : float, optional
Value to control stopping criterion.
alpha0 : scalar, optional
Value of `alpha` at start of the optimization.
Returns
-------
alpha
f_count
f_val_at_alpha
Notes
-----
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1*pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval # compute f(xk) -- done in past loop
derphi0 = np.dot(gfk, pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
alpha0=alpha0)
return alpha, fc[0], phi1
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
"""
Compatibility wrapper for `line_search_armijo`
"""
r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
alpha0=alpha0)
return r[0], r[1], 0, r[2]
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
"""Minimize over alpha, the function ``phi(alpha)``.
Uses the interpolation algorithm (Armijo backtracking) as suggested by
Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57
alpha > 0 is assumed to be a descent direction.
Returns
-------
alpha
phi1
"""
phi_a0 = phi(alpha0)
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0
# Otherwise compute the minimizer of a quadratic interpolant:
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
return alpha1, phi_a1
# Otherwise loop with cubic interpolation until we find an alpha which
# satifies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1
|
|
import pytest
from mock import Mock
import six
from thefuck.utils import default_settings, \
memoize, get_closest, get_all_executables, replace_argument, \
get_all_matched_commands, is_app, for_app, cache, compatibility_call
from tests.utils import Command
@pytest.mark.parametrize('override, old, new', [
({'key': 'val'}, {}, {'key': 'val'}),
({'key': 'new-val'}, {'key': 'val'}, {'key': 'val'}),
({'key': 'new-val', 'unset': 'unset'}, {'key': 'val'}, {'key': 'val', 'unset': 'unset'})])
def test_default_settings(settings, override, old, new):
settings.clear()
settings.update(old)
fn = lambda _: _
default_settings(override)(fn)(None)
assert settings == new
def test_memoize():
fn = Mock(__name__='fn')
memoized = memoize(fn)
memoized()
memoized()
fn.assert_called_once_with()
@pytest.mark.usefixtures('no_memoize')
def test_no_memoize():
fn = Mock(__name__='fn')
memoized = memoize(fn)
memoized()
memoized()
assert fn.call_count == 2
class TestGetClosest(object):
def test_when_can_match(self):
assert 'branch' == get_closest('brnch', ['branch', 'status'])
def test_when_cant_match(self):
assert 'status' == get_closest('st', ['status', 'reset'])
def test_without_fallback(self):
assert get_closest('st', ['status', 'reset'],
fallback_to_first=False) is None
@pytest.fixture
def get_aliases(mocker):
mocker.patch('thefuck.shells.shell.get_aliases',
return_value=['vim', 'apt-get', 'fsck', 'fuck'])
@pytest.mark.usefixtures('no_memoize', 'get_aliases')
def test_get_all_executables():
all_callables = get_all_executables()
assert 'vim' in all_callables
assert 'fsck' in all_callables
assert 'fuck' not in all_callables
@pytest.mark.parametrize('args, result', [
(('apt-get instol vim', 'instol', 'install'), 'apt-get install vim'),
(('git brnch', 'brnch', 'branch'), 'git branch')])
def test_replace_argument(args, result):
assert replace_argument(*args) == result
@pytest.mark.parametrize('stderr, result', [
(("git: 'cone' is not a git command. See 'git --help'.\n"
'\n'
'Did you mean one of these?\n'
'\tclone'), ['clone']),
(("git: 're' is not a git command. See 'git --help'.\n"
'\n'
'Did you mean one of these?\n'
'\trebase\n'
'\treset\n'
'\tgrep\n'
'\trm'), ['rebase', 'reset', 'grep', 'rm']),
(('tsuru: "target" is not a tsuru command. See "tsuru help".\n'
'\n'
'Did you mean one of these?\n'
'\tservice-add\n'
'\tservice-bind\n'
'\tservice-doc\n'
'\tservice-info\n'
'\tservice-list\n'
'\tservice-remove\n'
'\tservice-status\n'
'\tservice-unbind'), ['service-add', 'service-bind', 'service-doc',
'service-info', 'service-list', 'service-remove',
'service-status', 'service-unbind'])])
def test_get_all_matched_commands(stderr, result):
assert list(get_all_matched_commands(stderr)) == result
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script, names, result', [
('git diff', ['git', 'hub'], True),
('hub diff', ['git', 'hub'], True),
('hg diff', ['git', 'hub'], False)])
def test_is_app(script, names, result):
assert is_app(Command(script), *names) == result
@pytest.mark.usefixtures('no_memoize')
@pytest.mark.parametrize('script, names, result', [
('git diff', ['git', 'hub'], True),
('hub diff', ['git', 'hub'], True),
('hg diff', ['git', 'hub'], False)])
def test_for_app(script, names, result):
@for_app(*names)
def match(command):
return True
assert match(Command(script)) == result
class TestCache(object):
@pytest.fixture(autouse=True)
def enable_cache(self, monkeypatch):
monkeypatch.setattr('thefuck.utils.cache.disabled', False)
@pytest.fixture
def shelve(self, mocker):
value = {}
class _Shelve(object):
def __init__(self, path):
pass
def __setitem__(self, k, v):
value[k] = v
def __getitem__(self, k):
return value[k]
def get(self, k, v=None):
return value.get(k, v)
def close(self):
return
mocker.patch('thefuck.utils.shelve.open', new_callable=lambda: _Shelve)
return value
@pytest.fixture(autouse=True)
def mtime(self, mocker):
mocker.patch('thefuck.utils.os.path.getmtime', return_value=0)
@pytest.fixture
def fn(self):
@cache('~/.bashrc')
def fn():
return 'test'
return fn
@pytest.fixture
def key(self):
if six.PY2:
return 'tests.test_utils.<function fn '
else:
return 'tests.test_utils.<function TestCache.fn.<locals>.fn '
def test_with_blank_cache(self, shelve, fn, key):
assert shelve == {}
assert fn() == 'test'
assert shelve == {key: {'etag': '0', 'value': 'test'}}
def test_with_filled_cache(self, shelve, fn, key):
cache_value = {key: {'etag': '0', 'value': 'new-value'}}
shelve.update(cache_value)
assert fn() == 'new-value'
assert shelve == cache_value
def test_when_etag_changed(self, shelve, fn, key):
shelve.update({key: {'etag': '-1', 'value': 'old-value'}})
assert fn() == 'test'
assert shelve == {key: {'etag': '0', 'value': 'test'}}
class TestCompatibilityCall(object):
def test_match(self):
def match(command):
assert command == Command()
return True
assert compatibility_call(match, Command())
def test_old_match(self, settings):
def match(command, _settings):
assert command == Command()
assert settings == _settings
return True
assert compatibility_call(match, Command())
def test_get_new_command(self):
def get_new_command(command):
assert command == Command()
return True
assert compatibility_call(get_new_command, Command())
def test_old_get_new_command(self, settings):
def get_new_command(command, _settings):
assert command == Command()
assert settings == _settings
return True
assert compatibility_call(get_new_command, Command())
def test_side_effect(self):
def side_effect(command, new_command):
assert command == Command() == new_command
return True
assert compatibility_call(side_effect, Command(), Command())
def test_old_side_effect(self, settings):
def side_effect(command, new_command, _settings):
assert command == Command() == new_command
assert settings == _settings
return True
assert compatibility_call(side_effect, Command(), Command())
|
|
#!/usr/bin/env python
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import os
import platform
import subprocess
import sys
from distutils.command.build import build
import pkg_resources
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.test import test
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src/ directory to the sys.path.
sys.path.insert(0, src_dir)
about = {}
with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
exec(f.read(), about)
VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
requirements = [
"idna>=2.0",
"pyasn1>=0.1.8",
"six>=1.4.1",
"setuptools>=11.3",
]
setup_requirements = []
if sys.version_info < (3, 4):
requirements.append("enum34")
if sys.version_info < (3, 3):
requirements.append("ipaddress")
if platform.python_implementation() == "PyPy":
if sys.pypy_version_info < (2, 6):
raise RuntimeError(
"cryptography 1.0 is not compatible with PyPy < 2.6. Please "
"upgrade PyPy to use this library."
)
else:
requirements.append("cffi>=1.4.1")
setup_requirements.append("cffi>=1.4.1")
test_requirements = [
"pytest",
"pretend",
"iso8601",
"pyasn1_modules",
]
if sys.version_info[:2] > (2, 6):
test_requirements.append("hypothesis>=1.11.4")
# If there's no vectors locally that probably means we are in a tarball and
# need to go and get the matching vectors package from PyPi
if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
test_requirements.append(VECTORS_DEPENDENCY)
def cc_is_available():
return sys.platform == "darwin" and list(map(
int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
backends = [
"openssl = cryptography.hazmat.backends.openssl:backend"
]
if cc_is_available():
backends.append(
"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend",
)
class PyTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
# This means there's a vectors/ folder with the package in here.
# cd into it, install the vectors package and then refresh sys.path
if VECTORS_DEPENDENCY not in test_requirements:
subprocess.check_call(
[sys.executable, "setup.py", "install"], cwd="vectors"
)
pkg_resources.get_distribution("cryptography_vectors").activate()
def run_tests(self):
# Import here because in module scope the eggs are not loaded.
import pytest
test_args = [os.path.join(base_dir, "tests")]
errno = pytest.main(test_args)
sys.exit(errno)
def keywords_with_side_effects(argv):
"""
Get a dictionary with setup keywords that (can) have side effects.
:param argv: A list of strings with command line arguments.
:returns: A dictionary with keyword arguments for the ``setup()`` function.
This setup.py script uses the setuptools 'setup_requires' feature because
this is required by the cffi package to compile extension modules. The
purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
build process as a result of setup.py invocations that don't need the cffi
module to be built (setup.py serves the dual purpose of exposing package
metadata).
All of the options listed by ``python setup.py --help`` that print
information should be recognized here. The commands ``clean``,
``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
Any combination of these options and commands is also supported.
This function was originally based on the `setup.py script`_ of SciPy (see
also the discussion in `pip issue #25`_).
.. _pip issue #25: https://github.com/pypa/pip/issues/25
.. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
"""
no_setup_requires_arguments = (
'-h', '--help',
'-n', '--dry-run',
'-q', '--quiet',
'-v', '--verbose',
'-V', '--version',
'--author',
'--author-email',
'--classifiers',
'--contact',
'--contact-email',
'--description',
'--egg-base',
'--fullname',
'--help-commands',
'--keywords',
'--licence',
'--license',
'--long-description',
'--maintainer',
'--maintainer-email',
'--name',
'--no-user-cfg',
'--obsoletes',
'--platforms',
'--provides',
'--requires',
'--url',
'clean',
'egg_info',
'register',
'sdist',
'upload',
)
def is_short_option(argument):
"""Check whether a command line argument is a short option."""
return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
def expand_short_options(argument):
"""Expand combined short options into canonical short options."""
return ('-' + char for char in argument[1:])
def argument_without_setup_requirements(argv, i):
"""Check whether a command line argument needs setup requirements."""
if argv[i] in no_setup_requires_arguments:
# Simple case: An argument which is either an option or a command
# which doesn't need setup requirements.
return True
elif (is_short_option(argv[i]) and
all(option in no_setup_requires_arguments
for option in expand_short_options(argv[i]))):
# Not so simple case: Combined short options none of which need
# setup requirements.
return True
elif argv[i - 1:i] == ['--egg-base']:
# Tricky case: --egg-info takes an argument which should not make
# us use setup_requires (defeating the purpose of this code).
return True
else:
return False
if all(argument_without_setup_requirements(argv, i)
for i in range(1, len(argv))):
return {
"cmdclass": {
"build": DummyBuild,
"install": DummyInstall,
"test": DummyPyTest,
}
}
else:
cffi_modules = [
"src/_cffi_src/build_openssl.py:ffi",
"src/_cffi_src/build_constant_time.py:ffi",
"src/_cffi_src/build_padding.py:ffi",
]
if cc_is_available():
cffi_modules.append("src/_cffi_src/build_commoncrypto.py:ffi")
return {
"setup_requires": setup_requirements,
"cmdclass": {
"test": PyTest,
},
"cffi_modules": cffi_modules
}
setup_requires_error = ("Requested setup command that needs 'setup_requires' "
"while command line arguments implied a side effect "
"free command or option.")
class DummyBuild(build):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py build`` as
one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
class DummyInstall(install):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py install``
as one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
class DummyPyTest(test):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py test`` as
one of the 'side effect free' commands or options.
"""
def run_tests(self):
raise RuntimeError(setup_requires_error)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
],
package_dir={"": "src"},
packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
include_package_data=True,
install_requires=requirements,
tests_require=test_requirements,
extras_require={
"test": test_requirements,
"docstest": [
"doc8",
"pyenchant",
"readme_renderer",
"sphinx",
"sphinx_rtd_theme",
"sphinxcontrib-spelling",
],
"pep8test": [
"flake8",
"flake8-import-order",
"pep8-naming",
],
},
# for cffi
zip_safe=False,
ext_package="cryptography.hazmat.bindings",
entry_points={
"cryptography.backends": backends,
},
**keywords_with_side_effects(sys.argv)
)
|
|
from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic.
stderr : float
Standard error of the estimated gradient.
See also
--------
:func:`scipy.optimize.curve_fit` : Use non-linear
least squares to fit a function to data.
:func:`scipy.optimize.leastsq` : Minimize the sum of
squares of a set of equations.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
r-squared: 0.080402268539
Plot the data along with the fitted line
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
slope = r_num / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
sterrest = 0.0
else:
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from looker_sdk import models
from looker_deployer.utils import deploy_logging
from looker_deployer.utils.get_client import get_client
logger = deploy_logging.get_logger(__name__)
class MultipleAssetsFoundError(Exception):
"""Exception raised if multiple assets are found"""
def __init__(self, asset_name, message="Found multiple entries for asset. Please remove duplicates"):
self.asset_name = asset_name
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.asset_name} -> {self.message}"
class TargetContentNotFound(Exception):
"""Exception raised if content is not found in target instance"""
def __init__(self, missing_dashes, missing_looks, message="Content not found in target instance."):
self.missing_dashes = missing_dashes
self.missing_looks = missing_looks
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} -> dashes: {self.missing_dashes}, looks: {self.missing_looks}"
def match_dashboard_id(source_dashboard_id, source_sdk, target_sdk):
source = source_sdk.dashboard(str(source_dashboard_id))
logger.debug("Attempting dashboard match", extra={"title": source.title, "slug": source.slug, "id": source.id})
target_dash = target_sdk.search_dashboards(slug=source.slug)
if len(target_dash) > 1:
raise MultipleAssetsFoundError(source.title)
assert len(target_dash) == 1, f"Could not find dashboard {source.title} in target env. Has it been deployed?"
target_id = target_dash[0].id
logger.debug("Found dashboard", extra={"id": target_id})
return target_id
def match_look_id(source_look_id, source_sdk, target_sdk):
source = source_sdk.look(source_look_id)
logger.debug("Attempting look match", extra={"title": source.title, "id": source.id})
target_look = target_sdk.search_looks(title=source.title)
if len(target_look) > 1:
raise MultipleAssetsFoundError(source.title)
assert len(target_look) == 1, f"Could not find look {source.title} in target env. Has it been deployed?"
target_id = target_look[0].id
logger.debug("Found look", extra={"id": target_id})
return target_id
def return_board(board_name, source_sdk):
logger.debug("Searching boards", extra={"title": board_name})
board_list = source_sdk.search_homepages(title=board_name)
if len(board_list) > 1:
raise MultipleAssetsFoundError(board_name)
assert len(board_list) == 1, "Could not find board! Double check available titles and try again."
logger.debug("Found board", extra={"board": board_list})
return board_list[0]
def create_or_update_board(source_board_object, target_sdk, title_override=None):
# Determine if board already exists in target environment
search_title = title_override or source_board_object.title
search_res = target_sdk.search_homepages(title=search_title)
assert len(search_res) < 2, "More than one board found! Refine your search or remove duplicate names."
try:
assert len(search_res) == 1
# If board does not exist then create
except AssertionError:
logger.info(
"No pre-existing board found. Creating new board in target environment",
extra={"title": search_title}
)
new_board = models.WriteHomepage(
title=source_board_object.title,
description=source_board_object.description
)
resp = target_sdk.create_homepage(new_board)
logger.info("Board created", extra={"id": resp.id})
return resp.id
# If board already exists, clear out sections and update
logger.info(
"Found board in target instance. Updating and rebuilding content",
extra={"title": search_title}
)
target_board = search_res[0]
# Clear out existing sections
section_list = [i.id for i in target_board.homepage_sections]
logger.debug("Found sections to clear", extra={"section_list": section_list})
for section in section_list:
logger.debug("Clearing section for refresh", extra={"section_id": section})
target_sdk.delete_homepage_section(section)
# Update
update_board = models.WriteHomepage(
title=source_board_object.title,
description=source_board_object.description
)
resp = target_sdk.update_homepage(target_board.id, update_board)
logger.info("Board updated", extra={"id": resp.id})
return resp.id
def create_board_section(source_board_section_object, target_board_id, target_sdk):
new_board_section = models.WriteHomepageSection(
title=source_board_section_object.title,
description=source_board_section_object.description,
homepage_id=target_board_id
)
logger.info("Creating Section", extra={"board_id": target_board_id, "section_title": new_board_section.title})
resp = target_sdk.create_homepage_section(new_board_section)
logger.info("Section created", extra={"section_id": resp.id})
return resp.id
def create_board_item(source_board_item_object, target_board_section_id, source_sdk, target_sdk):
dashboard_id = None
look_id = None
if source_board_item_object.dashboard_id:
dashboard_id = match_dashboard_id(source_board_item_object.dashboard_id, source_sdk, target_sdk)
if source_board_item_object.look_id:
look_id = match_look_id(source_board_item_object.look_id, source_sdk, target_sdk)
new_board_item = models.WriteHomepageItem()
new_board_item.__dict__.update(source_board_item_object.__dict__)
new_board_item.dashboard_id = dashboard_id
new_board_item.look_id = look_id
new_board_item.homepage_section_id = target_board_section_id
logger.info(
"Creating item",
extra={
"section_id": new_board_item.homepage_section_id,
"dashboard_id": new_board_item.dashboard_id,
"look_id": new_board_item.look_id,
"url": new_board_item.url
}
)
resp = target_sdk.create_homepage_item(new_board_item)
logger.info("Item created", extra={"id": resp.id})
return resp
def audit_board_content(board_object, source_sdk, target_sdk):
dash_list = []
look_list = []
missing_dashes = []
missing_looks = []
for i in board_object.homepage_sections:
for j in i.homepage_items:
if j.dashboard_id:
dash_list.append(j.dashboard_id)
if j.look_id:
look_list.append(j.look_id)
for dash in dash_list:
try:
match_dashboard_id(dash, source_sdk, target_sdk)
except AssertionError:
dash_title = source_sdk.dashboard(str(dash)).title
missing_dashes.append({"dash_id": dash, "dash_title": dash_title})
for look in look_list:
try:
match_look_id(look, source_sdk, target_sdk)
except AssertionError:
look_title = source_sdk.look(look).title
missing_looks.append({"look_id": look, "look_title": look_title})
return (missing_dashes, missing_looks)
def send_boards(board_name, source_sdk, target_sdk, title_override=None, allow_partial=False):
source_board = return_board(board_name, source_sdk)
missing_dashes, missing_looks = audit_board_content(source_board, source_sdk, target_sdk)
if not allow_partial and (missing_dashes or missing_looks):
logger.error(
"Missing Content. Make sure it's deployed or rerun with allow-partial flag.",
extra={"missing_dashboards": missing_dashes, "missing_looks": missing_looks}
)
raise TargetContentNotFound(missing_dashes, missing_looks)
elif missing_dashes or missing_looks:
logger.warning(
"Missing content warning.",
extra={"missing_dashboards": missing_dashes, "missing_looks": missing_looks}
)
else:
logger.info("All content accounted for!")
target_board_id = create_or_update_board(source_board, target_sdk, title_override)
for section in source_board.homepage_sections:
target_section_id = create_board_section(section, target_board_id, target_sdk)
for item in section.homepage_items:
try:
create_board_item(item, target_section_id, source_sdk, target_sdk)
except AssertionError:
if allow_partial:
logger.warning("Could not find content!", extra={"item": item.title})
pass
else:
raise
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
source_sdk = get_client(args.ini, args.source)
for t in args.target:
target_sdk = get_client(args.ini, t)
send_boards(args.board, source_sdk, target_sdk, args.title_change, args.allow_partial)
|
|
# from binaryninja import *
import os
import webbrowser
import time
import sys
from pathlib import Path
from urllib.request import pathname2url
from binaryninja.interaction import get_save_filename_input, show_message_box, TextLineField, ChoiceField, SaveFileNameField, get_form_input
from binaryninja.settings import Settings
from binaryninja.enums import MessageBoxButtonSet, MessageBoxIcon, MessageBoxButtonResult, InstructionTextTokenType, BranchType, DisassemblyOption, FunctionGraphType
from binaryninja.function import DisassemblySettings
from binaryninja.plugin import PluginCommand
colors = {'green': [162, 217, 175], 'red': [222, 143, 151], 'blue': [128, 198, 233], 'cyan': [142, 230, 237], 'lightCyan': [
176, 221, 228], 'orange': [237, 189, 129], 'yellow': [237, 223, 179], 'magenta': [218, 196, 209], 'none': [74, 74, 74],
'disabled': [144, 144, 144]}
escape_table = {
"'": "'",
">": ">",
"<": "<",
'"': """,
' ': " "
}
def escape(toescape):
# handle extended unicode
toescape = toescape.encode('ascii', 'xmlcharrefreplace')
# still escape the basics
if sys.version_info[0] == 3:
return ''.join(escape_table.get(chr(i), chr(i)) for i in toescape)
else:
return ''.join(escape_table.get(i, i) for i in toescape)
def save_svg(bv, function):
sym = bv.get_symbol_at(function.start)
if sym:
offset = sym.name
else:
offset = "%x" % function.start
path = Path(os.path.dirname(bv.file.filename))
origname = os.path.basename(bv.file.filename)
filename = path / f'binaryninja-{origname}-{offset}.html'
functionChoice = TextLineField("Blank to accept default")
# TODO: implement linear disassembly settings and output
modeChoices = ["Graph"]
modeChoiceField = ChoiceField("Mode", modeChoices)
if Settings().get_bool('ui.debugMode'):
formChoices = ["Assembly", "Lifted IL", "LLIL", "LLIL SSA", "Mapped Medium", "Mapped Medium SSA", "MLIL", "MLIL SSA", "HLIL", "HLIL SSA"]
formChoiceField = ChoiceField("Form", formChoices)
else:
formChoices = ["Assembly", "LLIL", "MLIL", "HLIL"]
formChoiceField = ChoiceField("Form", formChoices)
showOpcodes = ChoiceField("Show Opcodes", ["Yes", "No"])
showAddresses = ChoiceField("Show Addresses", ["Yes", "No"])
saveFileChoices = SaveFileNameField("Output file", 'HTML files (*.html)', str(filename))
if not get_form_input([f'Current Function: {offset}', functionChoice, formChoiceField, modeChoiceField, showOpcodes, showAddresses, saveFileChoices], "SVG Export") or saveFileChoices.result is None:
return
if saveFileChoices.result == '':
outputfile = filename
else:
outputfile = saveFileChoices.result
content = render_svg(function, offset, modeChoices[modeChoiceField.result], formChoices[formChoiceField.result], showOpcodes.result == 0, showAddresses.result == 0, origname)
output = open(outputfile, 'w')
output.write(content)
output.close()
result = show_message_box("Open SVG", "Would you like to view the exported SVG?",
buttons=MessageBoxButtonSet.YesNoButtonSet, icon=MessageBoxIcon.QuestionIcon)
if result == MessageBoxButtonResult.YesButton:
# might need more testing, latest py3 on windows seems.... broken with these APIs relative to other platforms
if sys.platform == 'win32':
webbrowser.open(outputfile)
else:
webbrowser.open('file://' + str(outputfile))
def instruction_data_flow(function, address):
# TODO: Extract data flow information
length = function.view.get_instruction_length(address)
func_bytes = function.view.read(address, length)
if sys.version_info[0] == 3:
hex = func_bytes.hex()
else:
hex = func_bytes.encode('hex')
padded = ' '.join([hex[i:i + 2] for i in range(0, len(hex), 2)])
return 'Opcode: {bytes}'.format(bytes=padded)
def render_svg(function, offset, mode, form, showOpcodes, showAddresses, origname):
settings = DisassemblySettings()
if showOpcodes:
settings.set_option(DisassemblyOption.ShowOpcode, True)
if showAddresses:
settings.set_option(DisassemblyOption.ShowAddress, True)
if form == "LLIL":
graph_type = FunctionGraphType.LowLevelILFunctionGraph
elif form == "LLIL SSA":
graph_type = FunctionGraphType.LowLevelILSSAFormFunctionGraph
elif form == "Lifted IL":
graph_type = FunctionGraphType.LiftedILFunctionGraph
elif form == "Mapped Medium":
graph_type = FunctionGraphType.MappedMediumLevelILFunctionGraph
elif form == "Mapped Medium SSA":
graph_type = FunctionGraphType.MappedMediumLevelILSSAFormFunctionGraph
elif form == "MLIL":
graph_type = FunctionGraphType.MediumLevelILFunctionGraph
elif form == "MLIL SSA":
graph_type = FunctionGraphType.MediumLevelILSSAFormFunctionGraph
elif form == "HLIL":
graph_type = FunctionGraphType.HighLevelILFunctionGraph
elif form == "HLIL SSA":
graph_type = FunctionGraphType.HighLevelILSSAFormFunctionGraph
else:
graph_type = FunctionGraphType.NormalFunctionGraph
graph = function.create_graph(graph_type=graph_type, settings=settings)
graph.layout_and_wait()
heightconst = 15
ratio = 0.48
widthconst = heightconst * ratio
output = '''<html>
<head>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Source+Code+Pro);
body {
background-color: rgb(42, 42, 42);
color: rgb(220, 220, 220);
font-family: "Source Code Pro", "Lucida Console", "Consolas", monospace;
}
a, a:visited {
color: rgb(200, 200, 200);
font-weight: bold;
}
svg {
background-color: rgb(42, 42, 42);
display: block;
margin: 0 auto;
}
.basicblock {
stroke: rgb(224, 224, 224);
}
.edge {
fill: none;
stroke-width: 1px;
}
.back_edge {
fill: none;
stroke-width: 2px;
}
.UnconditionalBranch, .IndirectBranch {
stroke: rgb(128, 198, 233);
color: rgb(128, 198, 233);
}
.FalseBranch {
stroke: rgb(222, 143, 151);
color: rgb(222, 143, 151);
}
.TrueBranch {
stroke: rgb(162, 217, 175);
color: rgb(162, 217, 175);
}
.arrow {
stroke-width: 1;
fill: currentColor;
}
text {
font-family: "Source Code Pro", "Lucida Console", "Consolas", monospace;
font-size: 9pt;
fill: rgb(224, 224, 224);
}
.CodeSymbolToken {
fill: rgb(128, 198, 223);
}
.DataSymbolToken {
fill: rgb(142, 230, 237);
}
.TextToken, .InstructionToken, .BeginMemoryOperandToken, .EndMemoryOperandToken {
fill: rgb(224, 224, 224);
}
.CodeRelativeAddressToken, .PossibleAddressToken, .IntegerToken, .AddressDisplayToken {
fill: rgb(162, 217, 175);
}
.RegisterToken {
fill: rgb(237, 223, 179);
}
.AnnotationToken {
fill: rgb(218, 196, 209);
}
.IndirectImportToken, .ImportToken, .ExternalSymbolToken {
fill: rgb(237, 189, 129);
}
.LocalVariableToken, .StackVariableToken {
fill: rgb(193, 220, 199);
}
.OpcodeToken {
fill: rgb(144, 144, 144);
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.12.2/jquery.min.js"></script>
</head>
'''
output += '''<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{width}" height="{height}">
<defs>
<marker id="arrow-TrueBranch" class="arrow TrueBranch" viewBox="0 0 10 10" refX="10" refY="5" markerUnits="strokeWidth" markerWidth="8" markerHeight="6" orient="auto">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
<marker id="arrow-FalseBranch" class="arrow FalseBranch" viewBox="0 0 10 10" refX="10" refY="5" markerUnits="strokeWidth" markerWidth="8" markerHeight="6" orient="auto">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
<marker id="arrow-UnconditionalBranch" class="arrow UnconditionalBranch" viewBox="0 0 10 10" refX="10" refY="5" markerUnits="strokeWidth" markerWidth="8" markerHeight="6" orient="auto">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
<marker id="arrow-IndirectBranch" class="arrow IndirectBranch" viewBox="0 0 10 10" refX="10" refY="5" markerUnits="strokeWidth" markerWidth="8" markerHeight="6" orient="auto">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
</defs>
'''.format(width=graph.width * widthconst + 20, height=graph.height * heightconst + 20)
output += ''' <g id="functiongraph0" class="functiongraph">
<title>Function Graph 0</title>
'''
edges = ''
for i, block in enumerate(graph):
# Calculate basic block location and coordinates
x = ((block.x) * widthconst)
y = ((block.y) * heightconst)
width = ((block.width) * widthconst)
height = ((block.height) * heightconst)
# Render block
output += ' <g id="basicblock{i}">\n'.format(i=i)
output += ' <title>Basic Block {i}</title>\n'.format(i=i)
rgb = colors['none']
try:
bb = block.basic_block
if hasattr(bb.highlight, 'color'):
color_code = bb.highlight.color
color_str = bb.highlight._standard_color_to_str(color_code)
if color_str in colors:
rgb = colors[color_str]
else:
rgb = [bb.highlight.red, bb.highlight.green, bb.highlight.blue]
except:
pass
output += ' <rect class="basicblock" x="{x}" y="{y}" fill-opacity="0.4" height="{height}" width="{width}" fill="rgb({r},{g},{b})"/>\n'.format(
x=x, y=y, width=width + 16, height=height + 12, r=rgb[0], g=rgb[1], b=rgb[2])
# Render instructions, unfortunately tspans don't allow copying/pasting more
# than one line at a time, need SVG 1.2 textarea tags for that it looks like
output += ' <text x="{x}" y="{y}">\n'.format(
x=x, y=y + (i + 1) * heightconst)
for i, line in enumerate(block.lines):
output += ' <tspan id="instr-{address}" x="{x}" y="{y}">'.format(
x=x + 6, y=y + 6 + (i + 0.7) * heightconst, address=hex(line.address)[:-1])
hover = instruction_data_flow(function, line.address)
output += '<title>{hover}</title>'.format(hover=hover)
for token in line.tokens:
# TODO: add hover for hex, function, and reg tokens
output += '<tspan class="{tokentype}">{text}</tspan>'.format(
text=escape(token.text), tokentype=InstructionTextTokenType(token.type).name)
output += '</tspan>\n'
output += ' </text>\n'
output += ' </g>\n'
# Edges are rendered in a seperate chunk so they have priority over the
# basic blocks or else they'd render below them
for edge in block.outgoing_edges:
points = ""
x, y = edge.points[0]
points += str(x * widthconst) + "," + \
str(y * heightconst + 12) + " "
for x, y in edge.points[1:-1]:
points += str(x * widthconst) + "," + \
str(y * heightconst) + " "
x, y = edge.points[-1]
points += str(x * widthconst) + "," + \
str(y * heightconst + 0) + " "
if edge.back_edge:
edges += ' <polyline class="back_edge {type}" points="{points}" marker-end="url(#arrow-{type})"/>\n'.format(
type=BranchType(edge.type).name, points=points)
else:
edges += ' <polyline class="edge {type}" points="{points}" marker-end="url(#arrow-{type})"/>\n'.format(
type=BranchType(edge.type).name, points=points)
output += ' ' + edges + '\n'
output += ' </g>\n'
output += '</svg>'
output += '<p>This CFG generated by <a href="https://binary.ninja/">Binary Ninja</a> from {filename} on {timestring} showing {function} as {form}.</p>'.format(
filename=origname, timestring=time.strftime("%c"), function=offset, form=form)
output += '</html>'
return output
PluginCommand.register_for_function(
"Export to SVG", "Exports an SVG of the current function", save_svg)
|
|
#!/usr/bin/env python
# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from six.moves import cStringIO as StringIO # noqa
# These variables will be useful if we will need to skip some pylint checks
ignore_codes = []
ignore_messages = []
ignore_modules = []
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict.
Each key is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
if any(msg in self.message for msg in ignore_messages):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % {
"filename": self.filename,
"lineno": self.lineno,
"line_content": self.line_content,
"code": self.code,
"message": self.message,
})
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
args = ["--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}",
"-E",
"ceilometer"]
lint.Run(args, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false"
" positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
|
|
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2015, Nvizible Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class CropTest( GafferImageTest.ImageTestCase ) :
imageFileUndersizeDataWindow = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/blueWithDataWindow.100x100.exr" )
imageFileOversizeDataWindow = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checkerWithNegWindows.200x150.exr" )
representativeDeepImagePath = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/representativeDeepImage.exr" )
def testDefaultState( self ) :
crop = GafferImage.Crop()
self.assertEqual( crop["areaSource"].getValue(), GafferImage.Crop.AreaSource.Area )
self.assertTrue( crop["area"].getValue().isEmpty() )
self.assertEqual( crop["affectDataWindow"].getValue(), True )
self.assertEqual( crop["affectDisplayWindow"].getValue(), True )
def testPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileUndersizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.Area )
crop["area"].setValue( imath.Box2i( imath.V2i( 40 ), imath.V2i( 50 ) ) )
crop["affectDataWindow"].setValue( True )
crop["affectDisplayWindow"].setValue( True )
crop["resetOrigin"].setValue( False )
self.assertEqual(i['out'].channelDataHash( "R", imath.V2i( 0 ) ), crop['out'].channelDataHash( "R", imath.V2i( 0 ) ) )
self.assertEqual(i['out'].channelDataHash( "G", imath.V2i( 0 ) ), crop['out'].channelDataHash( "G", imath.V2i( 0 ) ) )
self.assertEqual(i['out'].channelDataHash( "B", imath.V2i( 0 ) ), crop['out'].channelDataHash( "B", imath.V2i( 0 ) ) )
self.assertEqual( i["out"]["metadata"].hash(), crop["out"]["metadata"].hash() )
self.assertEqual( i["out"]["channelNames"].hash(), crop["out"]["channelNames"].hash() )
self.assertNotEqual( i["out"]["format"].hash(), crop["out"]["format"].hash() )
self.assertNotEqual( i["out"]["dataWindow"].hash(), crop["out"]["dataWindow"].hash() )
self.assertEqual( i["out"]["metadata"].getValue(), crop["out"]["metadata"].getValue() )
self.assertEqual( i["out"]["channelNames"].getValue(), crop["out"]["channelNames"].getValue() )
self.assertNotEqual( i["out"]["format"].getValue(), crop["out"]["format"].getValue() )
self.assertNotEqual( i["out"]["dataWindow"].getValue(), crop["out"]["dataWindow"].getValue() )
def testEnableBehaviour( self ) :
crop = GafferImage.Crop()
self.assertTrue( crop.enabledPlug().isSame( crop["enabled"] ) )
self.assertTrue( crop.correspondingInput( crop["out"] ).isSame( crop["in"] ) )
self.assertEqual( crop.correspondingInput( crop["in"] ), None )
self.assertEqual( crop.correspondingInput( crop["enabled"] ), None )
def testAreaFormat( self ) :
constant = GafferImage.Constant()
constant['format'].setValue( GafferImage.Format( 1024, 576 ) )
crop1 = GafferImage.Crop()
crop1['in'].setInput( constant['out'] )
crop1['areaSource'].setValue( GafferImage.Crop.AreaSource.Format )
crop1['format'].setValue( GafferImage.Format( 2048, 1152 ) )
crop2 = GafferImage.Crop()
crop2['in'].setInput( constant['out'] )
crop2['areaSource'].setValue( GafferImage.Crop.AreaSource.Area )
crop2['area'].setValue( imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 2048, 1152 ) ) )
self.assertEqual( crop1['out']['dataWindow'].getValue(), crop2['out']['dataWindow'].getValue() )
crop1['formatCenter'].setValue( True )
crop2['area'].setValue( imath.Box2i( imath.V2i( -512, -288 ), imath.V2i( 1536, 864 ) ) )
crop2['resetOrigin'].setValue( True )
self.assertEqual( crop1['out']['dataWindow'].getValue(), crop2['out']['dataWindow'].getValue() )
def testAffectDataWindow( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileUndersizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.Area )
crop["area"].setValue( imath.Box2i( imath.V2i( 40 ), imath.V2i( 50 ) ) )
crop["affectDataWindow"].setValue( True )
crop["affectDisplayWindow"].setValue( False )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 40 ), imath.V2i( 50 ) ) )
self.assertEqual( i["out"]["format"].getValue(), crop["out"]["format"].getValue() )
def testAffectDisplayWindow( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileUndersizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.Area )
crop["area"].setValue( imath.Box2i( imath.V2i( 40 ), imath.V2i( 50 ) ) )
crop["affectDataWindow"].setValue( False )
crop["affectDisplayWindow"].setValue( True )
crop["resetOrigin"].setValue( False )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 40 ), imath.V2i( 50 ) ) )
self.assertEqual( i["out"]["dataWindow"].getValue(), crop["out"]["dataWindow"].getValue() )
crop["resetOrigin"].setValue( True )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 10 ) ) )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( -10 ), imath.V2i( 40 ) ) )
def testIntersectDataWindow( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileUndersizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.Area )
crop["area"].setValue( imath.Box2i( imath.V2i( 0 ), imath.V2i( 50 ) ) )
crop["affectDataWindow"].setValue( True )
crop["affectDisplayWindow"].setValue( False )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 30 ), imath.V2i( 50 ) ) )
def testDataWindowToDisplayWindow( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileUndersizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.DataWindow )
crop["affectDataWindow"].setValue( False )
crop["affectDisplayWindow"].setValue( True )
crop["resetOrigin"].setValue( False )
self.assertEqual( i["out"]["dataWindow"].getValue(), crop["out"]["format"].getValue().getDisplayWindow() )
self.assertEqual( crop["out"]["dataWindow"].getValue(), i["out"]["dataWindow"].getValue() )
crop["resetOrigin"].setValue( True )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 50 ) ) )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), imath.V2i( 50 ) ) )
def testDisplayWindowToDataWindow( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFileOversizeDataWindow )
crop = GafferImage.Crop()
crop["in"].setInput(i["out"])
crop["areaSource"].setValue( GafferImage.Crop.AreaSource.DisplayWindow )
crop["affectDataWindow"].setValue( True )
crop["affectDisplayWindow"].setValue( False )
self.assertEqual( i["out"]["format"].getValue().getDisplayWindow(), crop["out"]["dataWindow"].getValue() )
def testAffects( self ) :
c = GafferImage.Crop()
self.assertEqual(
set( c.affects( c["affectDisplayWindow"] ) ),
{ c["out"]["format"], c["__offset"]["x"], c["__offset"]["y"] }
)
self.assertEqual(
set( c.affects( c["affectDataWindow"] ) ),
{ c["__cropDataWindow"] }
)
self.assertTrue( c["__cropDataWindow"] in set( c.affects( c["in"]["dataWindow"] ) ) )
self.assertTrue( c["out"]["format"] in set( c.affects( c["in"]["format"] ) ) )
def testResetOrigin( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 100, 200, 1 ) )
crop = GafferImage.Crop()
crop["in"].setInput( constant["out"] )
self.assertEqual( crop["affectDisplayWindow"].getValue(), True )
self.assertEqual( crop["affectDataWindow"].getValue(), True )
self.assertEqual( crop["resetOrigin"].getValue(), True )
area = imath.Box2i( imath.V2i( 50 ), imath.V2i( 100, 190 ) )
crop["area"].setValue( area )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), area.size() ) )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( 0 ), area.size() ) )
crop["resetOrigin"].setValue( False )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), area )
self.assertEqual( crop["out"]["dataWindow"].getValue(), area )
# If we're not affecting the display window, then the reset origin flag
# should be ignored.
crop["resetOrigin"].setValue( True )
crop["affectDisplayWindow"].setValue( False )
self.assertEqual( crop["out"]["format"].getValue(), crop["in"]["format"].getValue() )
self.assertEqual( crop["out"]["dataWindow"].getValue(), area )
# But if we are affecting the display window, and we are resetting the origin,
# the data window should be offset even if affectDataWindow is off.
crop["affectDisplayWindow"].setValue( True )
crop["affectDataWindow"].setValue( False )
self.assertEqual( crop["out"]["format"].getValue().getDisplayWindow(), imath.Box2i( imath.V2i( 0 ), area.size() ) )
self.assertEqual( crop["out"]["dataWindow"].getValue(), imath.Box2i( imath.V2i( -50 ), imath.V2i( 50, 150 ) ) )
def testEmptyInput( self ) :
crop = GafferImage.Crop()
crop["area"]["min"].setValue( imath.V2i( 20 ) )
self.assertTrue( GafferImage.BufferAlgo.empty( crop["out"]["dataWindow"].getValue() ) )
def testDeep( self ) :
representativeDeep = GafferImage.ImageReader()
representativeDeep["fileName"].setValue( self.representativeDeepImagePath )
deepCrop = GafferImage.Crop()
deepCrop["in"].setInput( representativeDeep["out"] )
postFlatten = GafferImage.DeepToFlat()
postFlatten["in"].setInput( deepCrop["out"] )
preFlatten = GafferImage.DeepToFlat()
preFlatten["in"].setInput( representativeDeep["out"] )
flatCrop = GafferImage.Crop()
flatCrop["in"].setInput( preFlatten["out"] )
dataWindow = representativeDeep["out"].dataWindow()
for affectDisplay in [ True, False ]:
for area in [
imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 150, 100 ) ),
imath.Box2i( imath.V2i( -10, -13 ), imath.V2i( 157, 103 ) ),
imath.Box2i( imath.V2i( 10, 13 ), imath.V2i( 143, 77 ) ),
imath.Box2i( imath.V2i( 37, 65 ), imath.V2i( 101, 67 ) ),
imath.Box2i( imath.V2i( 0, 0 ), imath.V2i( 149, 99 ) )
] :
deepCrop["area"].setValue( area )
flatCrop["area"].setValue( area )
self.assertImagesEqual( postFlatten["out"], flatCrop["out"] )
def testFormatAffectsOutput( self ) :
crop = GafferImage.Crop()
cs = GafferTest.CapturingSlot( crop.plugDirtiedSignal() )
crop["format"].setValue( GafferImage.Format( 100, 200 ) )
self.assertIn( crop["out"]["dataWindow"], { x[0] for x in cs } )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
import unittest
from testtools import *
import sys
arg = None
def sa_call(method_name, params=[], user_name='alice'):
if arg in ['-v', '--verbose']:
verbose = True
else:
verbose = False
return api_call(method_name, 'sa/2', params=params, user_name=user_name, verbose=verbose)
def _remove_key(dct, val):
copy = dct.copy()
del copy[val]
return copy
class TestGSAv2(unittest.TestCase):
NOT_IMPLEMENTED = 100
@classmethod
def setUpClass(klass):
# try to get custom fields before we start the tests
klass.sup_fields = []
try:
code, value, output = sa_call('get_version')
klass.sup_fields = value['FIELDS']
klass.has_key_service = ('KEY' in value['SERVICES'])
except Exception as e:
warn(["Error while trying to setup supplementary fields before starting tests (%s)" % (repr(e),)])
pass
def test_get_version(self):
"""
Test 'get_version' method.
Check result for various valid/required fields.
"""
code, value, output = sa_call('get_version')
self.assertEqual(code, 0) # no error
self.assertIsInstance(value, dict)
self.assertIn('VERSION', value)
self.assertIn('SERVICES', value)
self.assertIsInstance(value['SERVICES'], list)
self.assertIn('SLICE', value['SERVICES'])
for service_name in value['SERVICES']:
self.assertIn(service_name, ['SLICE','SLICE_MEMBER', 'SLIVER_INFO', 'PROJECT', 'PROJECT_MEMBER'])
self.assertIn('CREDENTIAL_TYPES', value)
creds = value['CREDENTIAL_TYPES']
self.assertIsInstance(creds, list)
self.assertTrue(len(creds) > 0)
for cred in creds:
self.assertIsInstance(cred['type'], str)
self.assertIsInstance(cred['version'], int)
if 'FIELDS' in value:
self.assertIsInstance(value['FIELDS'], dict)
for fk, fv in value['FIELDS'].iteritems():
self.assertIsInstance(fk, str)
self.assertIsInstance(fv, dict)
self.assertIn("TYPE", fv)
self.assertIn(fv["TYPE"], ["URN", "UID", "STRING", "DATETIME", "ESAIL", "KEY", "BOOLEAN", "CREDENTIAL", "CERTIFICATE"])
if "CREATE" in fv:
self.assertIn(fv["CREATE"], ["REQUIRED", "ALLOWED", "NOT ALLOWED"])
if "SATCH" in fv:
self.assertIsInstance(fv["SATCH"], bool)
if "UPDATE" in fv:
self.assertIsInstance(fv["UPDATE"], bool)
if "PROTECT" in fv:
self.assertIn(fv["PROTECT"], ["PUBLIC", "PRIVATE", "IDENTIFYING"])
else:
warn("No supplementary fields to test with.")
def test_malformed_field(self):
"""
Test type checking by passing a malformed field ('KEY_MEMBER' as a boolean)
during creation.
"""
create_data = {'SLICE_NAME':True, 'SLICE_DESCRIPTION' : 'My Malformed Slice', 'SLICE_PROJECT_URN' : 'urn:publicid:IDN+this_sa+project+myproject'}
self._test_create(create_data, 'SLICE', 'SLICE_URN', 3)
lookup_data = {'SLICE_PROJECT_URN': 'urn:publicid:IDN+this_sa+project+myproject'}
self.assertEqual(self._test_lookup(lookup_data,None,'SLICE',0), {})
def test_invalid_slice_name(self):
"""
This test is intended to test the validity of the slice_name upon creation.
"""
invalid_charachters = ['_','!',"@",'#','$','%','^','&','*','(',')','+']
invalid_sliceNames = ['invalid%sslicename' %c for c in invalid_charachters] +['-invalidslicename']
for invalid_sliceName in invalid_sliceNames:
create_data = {'SLICE_NAME': invalid_sliceName,
'SLICE_DESCRIPTION': 'My Malformed Slice',
'SLICE_PROJECT_URN': 'urn:publicid:IDN+this_sa+project+myproject'}
self._test_create(create_data, 'SLICE', 'SLICE_URN',3)
#Asserting to make sure the invalid slice name was not created
lookup_data = {'SLICE_PROJECT_URN': 'urn:publicid:IDN+this_sa+project+myproject'}
self.assertEqual(self._test_lookup(lookup_data,None,'SLICE',0), {})
def test_create_unauthorized_field(self):
"""
Test creation rules by passing an unauthorized field ('KEY_ID') during creation.
"""
create_data = {'SLICE_EXPIRED' : True, 'SLICE_NAME':'UNAUTHORIZED_CREATION',
'SLICE_DESCRIPTION' : 'My Unauthorized Slice',
'SLICE_PROJECT_URN' : 'urn:publicid:IDN+this_sa+project+myproject'}
self._test_create(create_data, 'SLICE', 'SLICE_URN', 3)
#Asserting to make sure the unauthorized_field was not created
lookup_data = {'SLICE_PROJECT_URN': 'urn:publicid:IDN+this_sa+project+myproject'}
self.assertEqual(self._test_lookup(lookup_data,None,'SLICE',0), {})
def test_update_unauthorized_field(self):
"""
Test update rules by passing an unauthorized field ('KEY_TYPE') during creation.
"""
create_data = {'PROJECT_EXPIRATION':'2014-03-21T11:35:57Z', 'PROJECT_NAME': 'TEST_PROJECT', 'PROJECT_DESCRIPTION':'My test project'}
urn = self._test_create(create_data, 'PROJECT', 'PROJECT_URN', 0)
update_data = {'PROJECT_NAME' : 'UNAUTHORIZED_UPDATE'}
self._test_update(urn, update_data, 'PROJECT', 'PROJECT_URN', 3)
self._test_delete(urn, 'PROJECT', 'PROJECT_URN', 0)
def test_update_invalid_expiry(self):
"""
Test update rules by passing an invalid expiry date during update.
Note: We are only testing projects here because otherwise we would end up with slices left over (we can not remove slices).
"""
create_data = {
'PROJECT_NAME' : 'TEST-PROJECT',
'PROJECT_DESCRIPTION' : 'Time_Expiry'}
urn = self._test_create(create_data, 'PROJECT', 'PROJECT_URN', 0)
update_data = {'PROJECT_EXPIRATION' : '2013-07-29T13:15:30Z'}
self._test_update(urn, update_data, 'PROJECT', 'PROJECT_URN', 3)
self._test_delete(urn, 'PROJECT', 'PROJECT_URN', 0)
def test_lookup_multiple_slice_urns(self):
"""
Test whether it is possible to specify multiple slices and look those slices concurrently
"""
create_data_1 = {
'SLICE_NAME' : 'TEST-SLICE-1',
'SLICE_DESCRIPTION' : 'Time_Expiry'}
create_data_2 = {
'SLICE_NAME' : 'TEST-SLICE-2',
'SLICE_DESCRIPTION' : 'Time_Expiry'}
urn1 = self._test_create(create_data_1, 'SLICE', 'SLICE_URN', 0)
urn2 = self._test_create(create_data_2, 'SLICE', 'SLICE_URN', 0)
lookup_data={'SLICE_URN':[str(urn1),str(urn2)]}
self._test_lookup(lookup_data, None, 'SLICE', 0, 2)
def test_get_credentials(self):
"""
Test to see whether the get_credentials method is working or not
"""
create_data= {
'PROJECT_NAME' : 'TEST-SLICE-CREDENTIALS',
'PROJECT_DESCRIPTION' : 'TEST_CREDENTIALS'}
urn = self._test_create(create_data, 'PROJECT', 'PROJECT_URN', 0)
self._test_get_credentials(urn, TestGSAv2.NOT_IMPLEMENTED)
def test_slice(self):
"""
Test object type 'SLICE' methods: create, lookup, update.
Slice deletion method is explicity blocked by the API specification: 'No
SA should support slice deletion since there is no authoritative way to
know that there aren't live slivers associated with that slice.' In this
case, we check that the method returns an error.
There is a situation when the test data has been used once, as the key
will already exist (and cannot be deleted). In this case, we check if
the slice already exists. If it does, then we expect a duplicate error
when creating a new 'SLICE'. If it is not already present, we should
expect regular object creation.
When checking if the object already exists, we need to remove the same
field that is later used in the update operation, as it *may* not match
otherwise. This is because it *could* have been updated in the case that
the object already exists.
Similarly, if the object creation fails because the key already exists,
we need to get the URN from the previous 'lookup' call which should have
returned a result.
"""
create_data = {'SLICE_NAME':'AUTHORIZED-CREATION', 'SLICE_DESCRIPTION' : 'My Clean Slice', 'SLICE_PROJECT_URN' : 'urn:publicid:IDN+this_sa+project+myproject'}
lookup_data = _remove_key(create_data, 'SLICE_DESCRIPTION')
presence_check = self._test_lookup(lookup_data, None, 'SLICE', 0)
if len(presence_check) is 1:
create_code = 5
else:
create_code = 0
urn = self._test_create(create_data, 'SLICE', 'SLICE_URN', create_code)
update_data = {'SLICE_DESCRIPTION' : 'Update Slice'}
if urn is None:
urn, _ = presence_check.popitem()
self._test_update(urn, update_data, 'SLICE', 'SLICE_URN', 0)
self._test_delete(urn, 'SLICE', 'SLICE_URN', 100)
def test_sliver_info(self):
"""
Test object type 'SLIVER_INFO' methods: create, lookup, update and delete.
"""
create_data = { 'SLIVER_INFO_SLICE_URN' : 'urn:publicid:IDN+this.sa+slice+TESTSLICE', 'SLIVER_INFO_URN' : 'urn:publicid:IDN+this.sa+slice+TESTSLICE',
'SLIVER_INFO_AGGREGATE_URN' : 'urn:publicid:IDN+this.sa+slice+TESTSLICE', 'SLIVER_INFO_CREATOR_URN' : 'urn:publicid:IDN+this.sa+slice+TESTSLICE',
'SLIVER_INFO_EXPIRATION' : '2014-03-21T11:35:57Z', 'SLIVER_INFO_CREATION' : '2014-03-21T11:35:57Z'}
urn = self._test_create(create_data, 'SLIVER_INFO', 'SLIVER_INFO_URN', 0)
update_data = {'SLIVER_INFO_EXPIRATION' : '2014-04-21T11:35:57Z'}
self._test_update(urn, update_data, 'SLIVER_INFO', 'SLIVER_INFO_URN', 0)
self._test_delete(urn, 'SLIVER_INFO', 'SLIVER_INFO_URN', 0)
def test_project(self):
"""
Test object type 'PROJECT' methods: create, lookup, update and delete.
"""
create_data = {'PROJECT_EXPIRATION':'2014-03-21T11:35:57Z', 'PROJECT_NAME': 'TEST_PROJECT', 'PROJECT_DESCRIPTION':'My test project'}
urn = self._test_create(create_data, 'PROJECT', 'PROJECT_URN', 0)
update_data = {'PROJECT_DESCRIPTION' : 'M. Broadbent Test Project'}
self._test_update(urn, update_data, 'PROJECT', 'PROJECT_URN', 0)
self._test_delete(urn, 'PROJECT', 'PROJECT_URN', 0)
def _test_create(self, fields, object_type, expected_urn, expected_code):
"""
Helper method to test object creation.
"""
code, value, output = sa_call('create', [object_type, self._credential_list("admin"), {'fields' : fields}], user_name="admin")
self.assertEqual(code, expected_code)
if code is 0:
self.assertIsInstance(value, dict)
for field_key, field_value in fields.iteritems():
self.assertEqual(value.get(field_key), field_value)
self.assertIn(expected_urn, value)
urn = value.get(expected_urn)
self.assertIsInstance(urn, str)
return urn
def _test_update(self, urn, fields, object_type, expected_urn, expected_code):
"""
Helper method to test object update.
"""
code, value, output = sa_call('update', [object_type, urn, self._credential_list("admin"), {'fields' : fields}], user_name="admin")
self.assertEqual(code, expected_code)
if code is 0:
self.assertIsNone(value)
result = self._test_lookup({expected_urn : urn}, None, object_type, 0, 1)
for field_key, field_value in fields.iteritems():
self.assertEqual(result[urn].get(field_key), field_value)
def _test_lookup(self, match, _filter, object_type, expected_code, expected_length=None):
"""
Helper method to test object lookup.
"""
options = {}
if match:
options['match'] = match
if _filter:
options['filter'] = _filter
code, value, output = sa_call('lookup', [object_type, self._credential_list("admin"), options], user_name="admin")
self.assertEqual(code, expected_code)
if expected_length:
self.assertEqual(len(value), expected_length)
return value
def _test_delete(self, urn, object_type, expected_urn, expected_code):
"""
Helper method to test object deletion.
"""
code, value, output = sa_call('delete', [object_type, urn, self._credential_list("admin"), {}], user_name="admin")
self.assertEqual(code, expected_code)
self.assertIsNone(value)
self._test_lookup({expected_urn : urn}, None, object_type, 0)
def _test_get_credentials(self, urn, expected_code):
code, value, output = sa_call('get_credentials', [urn, self._credential_list("admin"), {}], user_name="admin")
self.assertEqual(code, expected_code)
def test_malformed_membership(self):
"""
Test type checking by passing incorrect (project) parameters in a slice
membership call.
"""
add_data = {'members_to_add' : [{'PROJECT_MEMBER' : 'test_urn', 'PROJECT_ROLE' : 'test_role'}]}
self._test_lookup_members('urn:publicid:IDN+this.sa+project+SLICE', 'SLICE', add_data, 0, 3)
def test_project_membership(self):
"""
Test the 'add', 'change' and 'remove' methods for 'PROJECT' membership
object.
"""
add_data = {'members_to_add' : [{'PROJECT_MEMBER' : 'test_urn', 'PROJECT_ROLE' : 'test_role'}]}
change_data = {'members_to_change' : [{'PROJECT_MEMBER' : 'test_urn', 'PROJECT_ROLE' : 'upgraded_test_role'}]}
remove_data = {'members_to_remove' : [{'PROJECT_MEMBER' : 'test_urn'}]}
self._test_lookup_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'PROJECT', add_data, 1, 0)
self._test_lookup_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'PROJECT', change_data, 1, 0)
self._test_lookup_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'PROJECT', remove_data, 0, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'test_urn','PROJECT', add_data, 1, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'test_urn','PROJECT', change_data, 1, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+project+TESTPROJECT', 'test_urn', 'PROJECT', remove_data, 0, 0)
def test_slice_membership(self):
"""
Test the 'add', 'change' and 'remove' methods for 'PROJECT' membership
object.
"""
add_data = {'members_to_add' : [{'SLICE_MEMBER' : 'test_urn', 'SLICE_ROLE' : 'test_role'}]}
change_data = {'members_to_change' : [{'SLICE_MEMBER' : 'test_urn', 'SLICE_ROLE' : 'upgraded_test_role'}]}
remove_data = {'members_to_remove' : [{'SLICE_MEMBER' : 'test_urn'}]}
self._test_lookup_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'SLICE', add_data, 1, 0)
self._test_lookup_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'SLICE', change_data, 1, 0)
self._test_lookup_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'SLICE', remove_data, 0, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'test_urn','SLICE', add_data, 1, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'test_urn','SLICE', change_data, 1, 0)
self._test_lookup_for_members('urn:publicid:IDN+this.sa+slice+TESTSLICE', 'test_urn', 'SLICE', remove_data, 0, 0)
def _test_modify_membership(self, urn, object_type, data, expected_code):
"""
Helper method to test object membership modification.
"""
code, value, output = sa_call('modify_membership', [object_type, urn, self._credential_list("admin"), data], user_name="admin")
self.assertEqual(code, expected_code)
return code
def _test_lookup_members(self, urn, object_type, data, expected_length, expected_code):
"""
Helper method to test object membership lookup.
"""
if self._test_modify_membership(urn, object_type, data, expected_code) is 0:
code, value, output = sa_call('lookup_members', [object_type, urn, self._credential_list("admin"), {}], user_name="admin")
self.assertEqual(code, 0)
self.assertEqual(len(value), expected_length)
def _test_lookup_for_members(self, urn, member_urn, object_type, data, expected_length, expected_code):
"""
Helper method to test object membership lookup for a member.
"""
if self._test_modify_membership(urn, object_type, data, expected_code) is 0:
code, value, output = sa_call('lookup_for_member', [object_type, member_urn, self._credential_list("admin"), {}], user_name="admin")
self.assertEqual(code, 0)
self.assertEqual(len(value), expected_length)
def _user_credentail_list(self):
"""Returns the _user_ credential for alice."""
return [{"SFA" : get_creds_file_contents('alice-cred.xml')}]
def _bad_user_credentail_list(self):
"""Returns the _user_ credential for malcom."""
return [{"SFA" : get_creds_file_contents('malcom-cred.xml')}]
def _credential_list(self, user_name):
"""Returns the _user_ credential for the given user_name."""
return [{"SFA" : get_creds_file_contents('%s-cred.xml' % (user_name,))}]
if __name__ == '__main__':
if len(sys.argv) == 2:
arg = sys.argv[1]
del sys.argv[1:]
unittest.main(verbosity=0, exit=True)
print_warnings()
|
|
# Test packages (dotted-name import)
import sys
import os
import tempfile
import textwrap
import unittest
from test import test_support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
# XXX Things to test
#
# import package without __init__
# import package with __init__
# __init__ importing submodule
# __init__ importing global module
# __init__ defining variables
# submodule importing other submodule
# submodule importing global module
# submodule import submodule via global name
# from package import submodule
# from package import subpackage
# from package import variable (defined in __init__)
# from package import * (defined in __init__)
class Test(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
def tearDown(self):
sys.path[:] = self.syspath
cleanout(self.root)
# delete all modules concerning the tested hiearchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_1(self):
hier = [("t1", None), ("t1 __init__"+os.extsep+"py", "")]
self.mkhier(hier)
import t1
def test_2(self):
hier = [
("t2", None),
("t2 __init__"+os.extsep+"py", "'doc for t2'"),
("t2 sub", None),
("t2 sub __init__"+os.extsep+"py", ""),
("t2 sub subsub", None),
("t2 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
import t2
self.assertEqual(t2.__doc__, "doc for t2")
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
# This exec crap is needed because Py3k forbids 'import *' outside
# of module-scope and __import__() is insufficient for what we need.
s = """
import t2
from t2 import *
self.assertEqual(dir(), ['self', 'sub', 't2'])
"""
self.run_code(s)
from t2 import sub
from t2.sub import subsub
from t2.sub.subsub import spam
self.assertEqual(sub.__name__, "t2.sub")
self.assertEqual(subsub.__name__, "t2.sub.subsub")
self.assertEqual(sub.subsub.__name__, "t2.sub.subsub")
for name in ['spam', 'sub', 'subsub', 't2']:
self.failUnless(locals()["name"], "Failed to import %s" % name)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
s = """
from t2 import *
self.failUnless(dir(), ['self', 'sub'])
"""
self.run_code(s)
def test_3(self):
hier = [
("t3", None),
("t3 __init__"+os.extsep+"py", ""),
("t3 sub", None),
("t3 sub __init__"+os.extsep+"py", ""),
("t3 sub subsub", None),
("t3 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
import t3.sub.subsub
self.assertEqual(t3.__name__, "t3")
self.assertEqual(t3.sub.__name__, "t3.sub")
self.assertEqual(t3.sub.subsub.__name__, "t3.sub.subsub")
def test_4(self):
hier = [
("t4.py", "raise RuntimeError('Shouldnt load t4.py')"),
("t4", None),
("t4 __init__"+os.extsep+"py", ""),
("t4 sub.py", "raise RuntimeError('Shouldnt load sub.py')"),
("t4 sub", None),
("t4 sub __init__"+os.extsep+"py", ""),
("t4 sub subsub"+os.extsep+"py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t4 sub subsub", None),
("t4 sub subsub __init__"+os.extsep+"py", "spam = 1"),
]
self.mkhier(hier)
s = """
from t4.sub.subsub import *
self.assertEqual(spam, 1)
"""
self.run_code(s)
def test_5(self):
hier = [
("t5", None),
("t5 __init__"+os.extsep+"py", "import t5.foo"),
("t5 string"+os.extsep+"py", "spam = 1"),
("t5 foo"+os.extsep+"py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__doc__', '__file__', '__name__',
'__package__', '__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__doc__', '__file__', '__name__', '__package__',
'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__doc__', '__file__', '__name__','__package__',
'spam'])
def test_6(self):
hier = [
("t6", None),
("t6 __init__"+os.extsep+"py",
"__all__ = ['spam', 'ham', 'eggs']"),
("t6 spam"+os.extsep+"py", ""),
("t6 ham"+os.extsep+"py", ""),
("t6 eggs"+os.extsep+"py", ""),
]
self.mkhier(hier)
import t6
self.assertEqual(fixdir(dir(t6)),
['__all__', '__doc__', '__file__',
'__name__', '__package__', '__path__'])
s = """
import t6
from t6 import *
self.assertEqual(fixdir(dir(t6)),
['__all__', '__doc__', '__file__',
'__name__', '__package__', '__path__',
'eggs', 'ham', 'spam'])
self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6'])
"""
self.run_code(s)
def test_7(self):
hier = [
("t7", None),
("t7"+os.extsep+"py", ""),
("t7 __init__"+os.extsep+"py", ""),
("t7 sub"+os.extsep+"py",
"raise RuntimeError('Shouldnt load sub.py')"),
("t7 sub", None),
("t7 sub __init__"+os.extsep+"py", ""),
("t7 sub "+os.extsep+"py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t7 sub subsub", None),
("t7 sub subsub __init__"+os.extsep+"py",
"spam = 1"),
]
self.mkhier(hier)
t7, sub, subsub = None, None, None
import t7 as tas
self.assertEqual(fixdir(dir(tas)),
['__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.failIf(t7)
from t7 import sub as subpar
self.assertEqual(fixdir(dir(subpar)),
['__doc__', '__file__', '__name__',
'__package__', '__path__'])
self.failIf(t7)
self.failIf(sub)
from t7.sub import subsub as subsubsub
self.assertEqual(fixdir(dir(subsubsub)),
['__doc__', '__file__', '__name__',
'__package__', '__path__', 'spam'])
self.failIf(t7)
self.failIf(sub)
self.failIf(subsub)
from t7.sub.subsub import spam as ham
self.assertEqual(ham, 1)
self.failIf(t7)
self.failIf(sub)
self.failIf(subsub)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
from flask import *
from flask.json import JSONEncoder
from flask.ext.cors import CORS
from flask.ext.login import LoginManager, login_user , logout_user , current_user , login_required
from werkzeug.contrib.fixers import ProxyFix
import simplejson as json
import os, sys
import datetime
app = Flask(__name__, static_url_path='/static')
sess = Session()
app.config.from_object('config')
if app.config['SQLALCHEMY_DATABASE_URI'] == None:
print "Need database config"
sys.exit(1)
from models import db, Quote, Vote, User
db.init_app(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@app.before_request
def before_request():
g.user = current_user
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# registers user
@app.route('/register', methods = ['POST'])
def register():
body = request.get_json()
print app.config
if 'secret' not in body or body['secret'] != app.config['ADMIN_REGISTRATION_SECRET_KEY']:
return jsonify({"Error": "Secret key is wrong"})
email = body['email']
password = body['password']
user = User(email=email, password=password)
db.session.add(user)
db.session.commit()
return jsonify(user.serialize)
# renders login page
@app.route('/login', methods=['GET'])
def render_login():
return app.send_static_file('login.html')
@app.route('/', methods=['GET'])
def render_index():
return app.send_static_file('index.html')
# user login
@app.route('/login', methods = ['POST'])
def login():
body = request.get_json()
if body:
email = body['email']
password = body['password']
else:
email = request.form.get('email')
password = request.form.get('password')
registered_user = User.query.filter_by(email=email,password=password).first()
if registered_user is None:
return jsonify({"Error": "Email or Password invalid"})
login_user(registered_user)
return redirect("/admin", code=302)
# renders admin page
@app.route('/admin', methods=['GET'])
def render_admin():
if current_user.is_authenticated() is False:
return redirect("/login", code=302)
return app.send_static_file('admin.html')
# user logout
@app.route('/logout', methods = ['GET'])
def logout():
logout_user()
return redirect("/login", code=302)
# renders summary page
@app.route('/summary', methods=['GET'])
def render_summary():
return app.send_static_file('summary.html')
# get all quotes
@app.route("/quote", methods = ['GET'])
def get_quote():
results = {}
if current_user.is_authenticated() is True and request.args and request.args['all'] == "true":
result = Quote.query.all()
for item in result:
results[item.id] = item.serialize
else:
# if user is not authenticated, return only quotes that are approved
result = Quote.query.filter(Quote.active==True).all()
for item in result:
results[item.id] = item.serialize
scores = db.session.query(Vote.quote_id, db.func.sum(Vote.value).label("score")).group_by(Vote.quote_id).join(Quote).filter(Quote.id.in_(results.keys())).all()
for i in scores:
results[i[0]]["score"] = i[1]
return jsonify(results)
# gets details of single quote
@app.route("/quote/<int:id>", methods = ['GET'])
def get_single_quote(id):
quote = Quote.query.get(id)
quote.view_count += 1
quote_score = db.session.query(db.func.sum(Vote.value)).group_by(Vote.quote_id).filter(Vote.quote_id==id).all()
db.session.commit()
quote = quote.serialize
quote["score"] = quote_score[0][0]
return jsonify(quote)
# submits a new quote
@app.route("/quote", methods = ['POST'])
def post_new_quote():
body = request.get_json()
conditions = {}
if "conditions" in body:
conditions = body['conditions']
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
ip = ip.partition(',')[0]
quote = Quote(text = body['text'], conditions = json.dumps(conditions), view_count = 1, ip = ip, active = False)
db.session.add(quote)
db.session.commit()
vote = Vote(ip = ip, value = 1, quote_id = quote.id) #auto upvote every new quote by 1
db.session.add(vote)
db.session.commit()
return jsonify(quote.serialize)
# submits a new vote for a single quote
@app.route("/quote/<int:quote_id>/vote", methods = ['POST'])
def post_new_vote(quote_id):
body = request.get_json()
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
ip = ip.partition(',')[0]
vote = Vote(ip = ip, value = body['value'], quote_id = quote_id)
db.session.add(vote)
db.session.commit()
return jsonify(vote.serialize)
# approves/ activates a single quote
@app.route("/quote/<int:id>/approve", methods = ['PUT'])
@login_required
def approve_quote(id):
quote = Quote.query.get(id)
quote.active = True
db.session.commit()
return jsonify(quote.serialize)
# unapproves/ rejects a single quote
@app.route("/quote/<int:id>/reject", methods = ['PUT'])
@login_required
def reject_quote(id):
quote = Quote.query.get(id)
quote.active = False
db.session.commit()
return jsonify(quote.serialize)
# deletes a single quote
@app.route("/quote/<int:id>", methods = ['DELETE'])
@login_required
def delete_quote(id):
vote = Vote.query.filter_by(quote_id = id).all()
quote = Quote.query.filter_by(id = id).all()
if quote == []:
return jsonify({"Error":"Quote does not exist"})
for v in vote:
db.session.delete(v)
db.session.commit()
for q in quote:
db.session.delete(q)
db.session.commit()
return jsonify({"Success":"Quote has been deleted"})
cors = CORS(app)
if __name__ == "__main__":
# app.debug = True #uncomment to run debug mode
app.run()
|
|
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import hashlib
from textwrap import dedent
import warnings
from logbook import Logger
import numpy
import pandas as pd
from pandas import read_csv
import pytz
import requests
from six import StringIO, iteritems, with_metaclass
from catalyst.errors import (
MultipleSymbolsFound,
SymbolNotFound,
ZiplineError
)
from catalyst.protocol import (
DATASOURCE_TYPE,
Event
)
from catalyst.assets import Equity
from catalyst.constants import LOG_LEVEL
logger = Logger('Requests Source Logger', level=LOG_LEVEL)
def roll_dts_to_midnight(dts, trading_day):
if len(dts) == 0:
return dts
return pd.DatetimeIndex(
(dts.tz_convert('US/Eastern') - pd.Timedelta(hours=16)).date,
tz='UTC',
) + trading_day
class FetcherEvent(Event):
pass
class FetcherCSVRedirectError(ZiplineError):
msg = dedent(
"""\
Attempt to fetch_csv from a redirected url. {url}
must be changed to {new_url}
"""
)
def __init__(self, *args, **kwargs):
self.url = kwargs["url"]
self.new_url = kwargs["new_url"]
self.extra = kwargs["extra"]
super(FetcherCSVRedirectError, self).__init__(*args, **kwargs)
# The following optional arguments are supported for
# requests backed data sources.
# see http://docs.python-requests.org/en/latest/api/#main-interface
# for a full list.
ALLOWED_REQUESTS_KWARGS = {
'params',
'headers',
'auth',
'cert'
}
# The following optional arguments are supported for pandas' read_csv
# function, and may be passed as kwargs to the datasource below.
# see http://pandas.pydata.org/
# pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
ALLOWED_READ_CSV_KWARGS = {
'sep',
'dialect',
'doublequote',
'escapechar',
'quotechar',
'quoting',
'skipinitialspace',
'lineterminator',
'header',
'index_col',
'names',
'prefix',
'skiprows',
'skipfooter',
'skip_footer',
'na_values',
'true_values',
'false_values',
'delimiter',
'converters',
'dtype',
'delim_whitespace',
'as_recarray',
'na_filter',
'compact_ints',
'use_unsigned',
'buffer_lines',
'warn_bad_lines',
'error_bad_lines',
'keep_default_na',
'thousands',
'comment',
'decimal',
'keep_date_col',
'nrows',
'chunksize',
'encoding',
'usecols'
}
SHARED_REQUESTS_KWARGS = {
'stream': True,
'allow_redirects': False,
}
def mask_requests_args(url, validating=False, params_checker=None, **kwargs):
requests_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_REQUESTS_KWARGS}
if params_checker is not None:
url, s_params = params_checker(url)
if s_params:
if 'params' in requests_kwargs:
requests_kwargs['params'].update(s_params)
else:
requests_kwargs['params'] = s_params
# Giving the connection 30 seconds. This timeout does not
# apply to the download of the response body.
# (Note that Quandl links can take >10 seconds to return their
# first byte on occasion)
requests_kwargs['timeout'] = 1.0 if validating else 30.0
requests_kwargs.update(SHARED_REQUESTS_KWARGS)
request_pair = namedtuple("RequestPair", ("requests_kwargs", "url"))
return request_pair(requests_kwargs, url)
class PandasCSV(with_metaclass(ABCMeta, object)):
def __init__(self,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**kwargs):
self.start_date = start_date
self.end_date = end_date
self.date_column = date_column
self.date_format = date_format
self.timezone = timezone
self.mask = mask
self.symbol_column = symbol_column or "symbol"
self.data_frequency = data_frequency
invalid_kwargs = set(kwargs) - ALLOWED_READ_CSV_KWARGS
if invalid_kwargs:
raise TypeError(
"Unexpected keyword arguments: %s" % invalid_kwargs,
)
self.pandas_kwargs = self.mask_pandas_args(kwargs)
self.symbol = symbol
self.finder = asset_finder
self.trading_day = trading_day
self.pre_func = pre_func
self.post_func = post_func
@property
def fields(self):
return self.df.columns.tolist()
def get_hash(self):
return self.namestring
@abstractmethod
def fetch_data(self):
return
@staticmethod
def parse_date_str_series(format_str, tz, date_str_series, data_frequency,
trading_day):
"""
Efficient parsing for a 1d Pandas/numpy object containing string
representations of dates.
Note: pd.to_datetime is significantly faster when no format string is
passed, and in pandas 0.12.0 the %p strptime directive is not correctly
handled if a format string is explicitly passed, but AM/PM is handled
properly if format=None.
Moreover, we were previously ignoring this parameter unintentionally
because we were incorrectly passing it as a positional. For all these
reasons, we ignore the format_str parameter when parsing datetimes.
"""
# Explicitly ignoring this parameter. See note above.
if format_str is not None:
logger.warn(
"The 'format_str' parameter to fetch_csv is deprecated. "
"Ignoring and defaulting to pandas default date parsing."
)
format_str = None
tz_str = str(tz)
if tz_str == pytz.utc.zone:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
utc=True,
errors='coerce',
)
else:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
errors='coerce',
).tz_localize(tz_str).tz_convert('UTC')
if data_frequency == 'daily':
parsed = roll_dts_to_midnight(parsed, trading_day)
return parsed
def mask_pandas_args(self, kwargs):
pandas_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_READ_CSV_KWARGS}
if 'usecols' in pandas_kwargs:
usecols = pandas_kwargs['usecols']
if usecols and self.date_column not in usecols:
# make a new list so we don't modify user's,
# and to ensure it is mutable
with_date = list(usecols)
with_date.append(self.date_column)
pandas_kwargs['usecols'] = with_date
# No strings in the 'symbol' column should be interpreted as NaNs
pandas_kwargs.setdefault('keep_default_na', False)
pandas_kwargs.setdefault('na_values', {'symbol': []})
return pandas_kwargs
def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(uppered, as_of_date=None)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan
def load_df(self):
df = self.fetch_data()
if self.pre_func:
df = self.pre_func(df)
# Batch-convert the user-specifed date column into timestamps.
df['dt'] = self.parse_date_str_series(
self.date_format,
self.timezone,
df[self.date_column],
self.data_frequency,
self.trading_day,
).values
# ignore rows whose dates we couldn't parse
df = df[df['dt'].notnull()]
if self.symbol is not None:
df['sid'] = self.symbol
elif self.finder:
df.sort_values(by=self.symbol_column, inplace=True)
# Pop the 'sid' column off of the DataFrame, just in case the user
# has assigned it, and throw a warning
try:
df.pop('sid')
warnings.warn(
"Assignment of the 'sid' column of a DataFrame is "
"not supported by Fetcher. The 'sid' column has been "
"overwritten.",
category=UserWarning,
stacklevel=2,
)
except KeyError:
# There was no 'sid' column, so no warning is necessary
pass
# Fill entries for any symbols that don't require a date to
# uniquely identify. Entries for which multiple securities exist
# are replaced with zeroes, while entries for which no asset
# exists are replaced with NaNs.
unique_symbols = df[self.symbol_column].unique()
sid_series = pd.Series(
data=map(self._lookup_unconflicted_symbol, unique_symbols),
index=unique_symbols,
name='sid',
)
df = df.join(sid_series, on=self.symbol_column)
# Fill any zero entries left in our sid column by doing a lookup
# using both symbol and the row date.
conflict_rows = df[df['sid'] == 0]
for row_idx, row in conflict_rows.iterrows():
try:
asset = self.finder.lookup_symbol(
row[self.symbol_column],
# Replacing tzinfo here is necessary because of the
# timezone metadata bug described below.
row['dt'].replace(tzinfo=pytz.utc),
# It's possible that no asset comes back here if our
# lookup date is from before any asset held the
# requested symbol. Mark such cases as NaN so that
# they get dropped in the next step.
) or numpy.nan
except SymbolNotFound:
asset = numpy.nan
# Assign the resolved asset to the cell
df.ix[row_idx, 'sid'] = asset
# Filter out rows containing symbols that we failed to find.
length_before_drop = len(df)
df = df[df['sid'].notnull()]
no_sid_count = length_before_drop - len(df)
if no_sid_count:
logger.warn(
"Dropped {} rows from fetched csv.".format(no_sid_count),
no_sid_count,
extra={'syslog': True},
)
else:
df['sid'] = df['symbol']
# Dates are localized to UTC when they come out of
# parse_date_str_series, but we need to re-localize them here because
# of a bug that wasn't fixed until
# https://github.com/pydata/pandas/pull/7092.
# We should be able to remove the call to tz_localize once we're on
# pandas 0.14.0
# We don't set 'dt' as the index until here because the Symbol parsing
# operations above depend on having a unique index for the dataframe,
# and the 'dt' column can contain multiple dates for the same entry.
df.drop_duplicates(["sid", "dt"])
df.set_index(['dt'], inplace=True)
df = df.tz_localize('UTC')
df.sort_index(inplace=True)
cols_to_drop = [self.date_column]
if self.symbol is None:
cols_to_drop.append(self.symbol_column)
df = df[df.columns.drop(cols_to_drop)]
if self.post_func:
df = self.post_func(df)
return df
def __iter__(self):
asset_cache = {}
for dt, series in self.df.iterrows():
if dt < self.start_date:
continue
if dt > self.end_date:
return
event = FetcherEvent()
# when dt column is converted to be the dataframe's index
# the dt column is dropped. So, we need to manually copy
# dt into the event.
event.dt = dt
for k, v in series.iteritems():
# convert numpy integer types to
# int. This assumes we are on a 64bit
# platform that will not lose information
# by casting.
# TODO: this is only necessary on the
# amazon qexec instances. would be good
# to figure out how to use the numpy dtypes
# without this check and casting.
if isinstance(v, numpy.integer):
v = int(v)
setattr(event, k, v)
# If it has start_date, then it's already an Asset
# object from asset_for_symbol, and we don't have to
# transform it any further. Checking for start_date is
# faster than isinstance.
if event.sid in asset_cache:
event.sid = asset_cache[event.sid]
elif hasattr(event.sid, 'start_date'):
# Clone for user algo code, if we haven't already.
asset_cache[event.sid] = event.sid
elif self.finder and isinstance(event.sid, int):
asset = self.finder.retrieve_asset(event.sid,
default_none=True)
if asset:
# Clone for user algo code.
event.sid = asset_cache[asset] = asset
elif self.mask:
# When masking drop all non-mappable values.
continue
elif self.symbol is None:
# If the event's sid property is an int we coerce
# it into an Equity.
event.sid = asset_cache[event.sid] = Equity(event.sid)
event.type = DATASOURCE_TYPE.CUSTOM
event.source_id = self.namestring
yield event
class PandasRequestsCSV(PandasCSV):
# maximum 100 megs to prevent DDoS
MAX_DOCUMENT_SIZE = (1024 * 1024) * 100
# maximum number of bytes to read in at a time
CONTENT_CHUNK_SIZE = 4096
def __init__(self,
url,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
special_params_checker=None,
**kwargs):
# Peel off extra requests kwargs, forwarding the remaining kwargs to
# the superclass.
# Also returns possible https updated url if sent to http quandl ds
# If url hasn't changed, will just return the original.
self._requests_kwargs, self.url =\
mask_requests_args(url,
params_checker=special_params_checker,
**kwargs)
remaining_kwargs = {
k: v for k, v in iteritems(kwargs)
if k not in self.requests_kwargs
}
self.namestring = type(self).__name__
super(PandasRequestsCSV, self).__init__(
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**remaining_kwargs
)
self.fetch_size = None
self.fetch_hash = None
self.df = self.load_df()
self.special_params_checker = special_params_checker
@property
def requests_kwargs(self):
return self._requests_kwargs
def fetch_url(self, url):
info = "checking {url} with {params}"
logger.info(info.format(url=url, params=self.requests_kwargs))
# setting decode_unicode=True sometimes results in a
# UnicodeEncodeError exception, so instead we'll use
# pandas logic for decoding content
try:
response = requests.get(url, **self.requests_kwargs)
except requests.exceptions.ConnectionError:
raise Exception('Could not connect to %s' % url)
if not response.ok:
raise Exception('Problem reaching %s' % url)
elif response.is_redirect:
# On the offchance we don't catch a redirect URL
# in validation, this will catch it.
new_url = response.headers['location']
raise FetcherCSVRedirectError(
url=url,
new_url=new_url,
extra={
'old_url': url,
'new_url': new_url
}
)
content_length = 0
logger.info('{} connection established in {:.1f} seconds'.format(
url, response.elapsed.total_seconds()))
# use the decode_unicode flag to ensure that the output of this is
# a string, and not bytes.
for chunk in response.iter_content(self.CONTENT_CHUNK_SIZE,
decode_unicode=True):
if content_length > self.MAX_DOCUMENT_SIZE:
raise Exception('Document size too big.')
if chunk:
content_length += len(chunk)
yield chunk
return
def fetch_data(self):
# create a data frame directly from the full text of
# the response from the returned file-descriptor.
data = self.fetch_url(self.url)
fd = StringIO()
if isinstance(data, str):
fd.write(data)
else:
for chunk in data:
fd.write(chunk)
self.fetch_size = fd.tell()
fd.seek(0)
try:
# see if pandas can parse csv data
frames = read_csv(fd, **self.pandas_kwargs)
frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8'))
self.fetch_hash = frames_hash.hexdigest()
except pd.parser.CParserError:
# could not parse the data, raise exception
raise Exception('Error parsing remote CSV data.')
finally:
fd.close()
return frames
|
|
"""Tkinker gui for pylint"""
import os
import sys
import re
import Queue
from threading import Thread
from Tkinter import (Tk, Frame, Listbox, Entry, Label, Button, Scrollbar,
Checkbutton, Radiobutton, IntVar, StringVar)
from Tkinter import (TOP, LEFT, RIGHT, BOTTOM, END, X, Y, BOTH, SUNKEN, W,
HORIZONTAL, DISABLED, NORMAL, W, E)
from tkFileDialog import askopenfilename, askdirectory
import pylint.lint
from pylint.reporters.guireporter import GUIReporter
HOME = os.path.expanduser('~/')
HISTORY = '.pylint-gui-history'
COLORS = {'(I)':'lightblue',
'(C)':'blue', '(R)':'darkblue',
'(W)':'black', '(E)':'darkred',
'(F)':'red'}
class BasicStream:
'''
used in gui reporter instead of writing to stdout, it is written to
this stream and saved in contents
'''
def __init__(self, gui):
"""init"""
self.curline = ""
self.gui = gui
self.contents = []
self.outdict = {}
self.currout = None
self.nextTitle = None
def write(self, text):
"""write text to the stream"""
if re.match('^--+$', text.strip()) or re.match('^==+$', text.strip()):
if self.currout:
self.outdict[self.currout].remove(self.nextTitle)
self.outdict[self.currout].pop()
self.currout = self.nextTitle
self.outdict[self.currout] = ['']
if text.strip():
self.nextTitle = text.strip()
if text.startswith('\n'):
self.contents.append('')
if self.currout: self.outdict[self.currout].append('')
self.contents[-1] += text.strip('\n')
if self.currout: self.outdict[self.currout][-1] += text.strip('\n')
if text.endswith('\n') and text.strip():
self.contents.append('')
if self.currout: self.outdict[self.currout].append('')
def fix_contents(self):
"""finalize what the contents of the dict should look like before output"""
for item in self.outdict:
numEmpty = self.outdict[item].count('')
for i in range(numEmpty):
self.outdict[item].remove('')
if self.outdict[item]:
self.outdict[item].pop(0)
def output_contents(self):
"""output contents of dict to the gui, and set the rating"""
self.fix_contents()
self.gui.tabs = self.outdict
try:
self.gui.rating.set(self.outdict['Global evaluation'][0])
except:
self.gui.rating.set('Error')
self.gui.refresh_results_window()
#reset stream variables for next run
self.contents = []
self.outdict = {}
self.currout = None
self.nextTitle = None
class LintGui:
"""Build and control a window to interact with pylint"""
def __init__(self, root=None):
"""init"""
self.root = root or Tk()
self.root.title('Pylint')
#reporter
self.reporter = None
#message queue for output from reporter
self.msg_queue = Queue.Queue()
self.msgs = []
self.filenames = []
self.rating = StringVar()
self.tabs = {}
self.report_stream = BasicStream(self)
#gui objects
self.lbMessages = None
self.showhistory = None
self.results = None
self.btnRun = None
self.information_box = None
self.convention_box = None
self.refactor_box = None
self.warning_box = None
self.error_box = None
self.fatal_box = None
self.txtModule = None
self.status = None
self.msg_type_dict = None
self.init_gui()
def init_gui(self):
"""init helper"""
#setting up frames
top_frame = Frame(self.root)
mid_frame = Frame(self.root)
radio_frame = Frame(self.root)
res_frame = Frame(self.root)
msg_frame = Frame(self.root)
check_frame = Frame(self.root)
history_frame = Frame(self.root)
btn_frame = Frame(self.root)
rating_frame = Frame(self.root)
top_frame.pack(side=TOP, fill=X)
mid_frame.pack(side=TOP, fill=X)
history_frame.pack(side=TOP, fill=BOTH, expand=True)
radio_frame.pack(side=TOP, fill=BOTH, expand=True)
rating_frame.pack(side=TOP, fill=BOTH, expand=True)
res_frame.pack(side=TOP, fill=BOTH, expand=True)
check_frame.pack(side=TOP, fill=BOTH, expand=True)
msg_frame.pack(side=TOP, fill=BOTH, expand=True)
btn_frame.pack(side=TOP, fill=X)
#Message ListBox
rightscrollbar = Scrollbar(msg_frame)
rightscrollbar.pack(side=RIGHT, fill=Y)
bottomscrollbar = Scrollbar(msg_frame, orient=HORIZONTAL)
bottomscrollbar.pack(side=BOTTOM, fill=X)
self.lbMessages = Listbox(msg_frame,
yscrollcommand=rightscrollbar.set,
xscrollcommand=bottomscrollbar.set,
bg="white")
self.lbMessages.pack(expand=True, fill=BOTH)
rightscrollbar.config(command=self.lbMessages.yview)
bottomscrollbar.config(command=self.lbMessages.xview)
#History ListBoxes
rightscrollbar2 = Scrollbar(history_frame)
rightscrollbar2.pack(side=RIGHT, fill=Y)
bottomscrollbar2 = Scrollbar(history_frame, orient=HORIZONTAL)
bottomscrollbar2.pack(side=BOTTOM, fill=X)
self.showhistory = Listbox(history_frame,
yscrollcommand=rightscrollbar2.set,
xscrollcommand=bottomscrollbar2.set,
bg="white")
self.showhistory.pack(expand=True, fill=BOTH)
rightscrollbar2.config(command=self.showhistory.yview)
bottomscrollbar2.config(command=self.showhistory.xview)
self.showhistory.bind('<Double-Button-1>', self.select_recent_file)
self.set_history_window()
#status bar
self.status = Label(self.root, text="", bd=1, relief=SUNKEN, anchor=W)
self.status.pack(side=BOTTOM, fill=X)
#labels
self.lblRatingLabel = Label(rating_frame, text='Rating:')
self.lblRatingLabel.pack(side=LEFT)
self.lblRating = Label(rating_frame, textvariable=self.rating)
self.lblRating.pack(side=LEFT)
Label(mid_frame, text='Recently Used:').pack(side=LEFT)
Label(top_frame, text='Module or package').pack(side=LEFT)
#file textbox
self.txtModule = Entry(top_frame, background='white')
self.txtModule.bind('<Return>', self.run_lint)
self.txtModule.pack(side=LEFT, expand=True, fill=X)
#results box
rightscrollbar = Scrollbar(res_frame)
rightscrollbar.pack(side=RIGHT, fill=Y)
bottomscrollbar = Scrollbar(res_frame, orient=HORIZONTAL)
bottomscrollbar.pack(side=BOTTOM, fill=X)
self.results = Listbox(res_frame,
yscrollcommand=rightscrollbar.set,
xscrollcommand=bottomscrollbar.set,
bg="white", font="Courier")
self.results.pack(expand=True, fill=BOTH, side=BOTTOM)
rightscrollbar.config(command=self.results.yview)
bottomscrollbar.config(command=self.results.xview)
#buttons
Button(top_frame, text='Open', command=self.file_open).pack(side=LEFT)
Button(top_frame, text='Open Package',
command=(lambda : self.file_open(package=True))).pack(side=LEFT)
self.btnRun = Button(top_frame, text='Run', command=self.run_lint)
self.btnRun.pack(side=LEFT)
Button(btn_frame, text='Quit', command=self.quit).pack(side=BOTTOM)
#radio buttons
self.information_box = IntVar()
self.convention_box = IntVar()
self.refactor_box = IntVar()
self.warning_box = IntVar()
self.error_box = IntVar()
self.fatal_box = IntVar()
i = Checkbutton(check_frame, text="Information", fg=COLORS['(I)'],
variable=self.information_box, command=self.refresh_msg_window)
c = Checkbutton(check_frame, text="Convention", fg=COLORS['(C)'],
variable=self.convention_box, command=self.refresh_msg_window)
r = Checkbutton(check_frame, text="Refactor", fg=COLORS['(R)'],
variable=self.refactor_box, command=self.refresh_msg_window)
w = Checkbutton(check_frame, text="Warning", fg=COLORS['(W)'],
variable=self.warning_box, command=self.refresh_msg_window)
e = Checkbutton(check_frame, text="Error", fg=COLORS['(E)'],
variable=self.error_box, command=self.refresh_msg_window)
f = Checkbutton(check_frame, text="Fatal", fg=COLORS['(F)'],
variable=self.fatal_box, command=self.refresh_msg_window)
i.select()
c.select()
r.select()
w.select()
e.select()
f.select()
i.pack(side=LEFT)
c.pack(side=LEFT)
r.pack(side=LEFT)
w.pack(side=LEFT)
e.pack(side=LEFT)
f.pack(side=LEFT)
#check boxes
self.box = StringVar()
# XXX should be generated
report = Radiobutton(radio_frame, text="Report", variable=self.box,
value="Report", command=self.refresh_results_window)
rawMet = Radiobutton(radio_frame, text="Raw metrics", variable=self.box,
value="Raw metrics", command=self.refresh_results_window)
dup = Radiobutton(radio_frame, text="Duplication", variable=self.box,
value="Duplication", command=self.refresh_results_window)
ext = Radiobutton(radio_frame, text="External dependencies",
variable=self.box, value="External dependencies",
command=self.refresh_results_window)
stat = Radiobutton(radio_frame, text="Statistics by type",
variable=self.box, value="Statistics by type",
command=self.refresh_results_window)
msgCat = Radiobutton(radio_frame, text="Messages by category",
variable=self.box, value="Messages by category",
command=self.refresh_results_window)
msg = Radiobutton(radio_frame, text="Messages", variable=self.box,
value="Messages", command=self.refresh_results_window)
report.select()
report.grid(column=0, row=0, sticky=W)
rawMet.grid(column=1, row=0, sticky=W)
dup.grid(column=2, row=0, sticky=W)
msg.grid(column=3, row=0, sticky=E)
stat.grid(column=0, row=1, sticky=W)
msgCat.grid(column=1, row=1, sticky=W)
ext.grid(column=2, row=1, columnspan=2, sticky=W)
#dictionary for check boxes and associated error term
self.msg_type_dict = {
'I' : lambda : self.information_box.get() == 1,
'C' : lambda : self.convention_box.get() == 1,
'R' : lambda : self.refactor_box.get() == 1,
'E' : lambda : self.error_box.get() == 1,
'W' : lambda : self.warning_box.get() == 1,
'F' : lambda : self.fatal_box.get() == 1
}
self.txtModule.focus_set()
def select_recent_file(self, event):
"""adds the selected file in the history listbox to the Module box"""
if not self.showhistory.size():
return
selected = self.showhistory.curselection()
item = self.showhistory.get(selected)
#update module
self.txtModule.delete(0, END)
self.txtModule.insert(0, item)
def refresh_msg_window(self):
"""refresh the message window with current output"""
#clear the window
self.lbMessages.delete(0, END)
for msg in self.msgs:
if (self.msg_type_dict.get(msg[0])()):
msg_str = self.convert_to_string(msg)
self.lbMessages.insert(END, msg_str)
fg_color = COLORS.get(msg_str[:3], 'black')
self.lbMessages.itemconfigure(END, fg=fg_color)
def refresh_results_window(self):
"""refresh the results window with current output"""
#clear the window
self.results.delete(0, END)
try:
for res in self.tabs[self.box.get()]:
self.results.insert(END, res)
except:
pass
def convert_to_string(self, msg):
"""make a string representation of a message"""
if (msg[2] != ""):
return "(" + msg[0] + ") " + msg[1] + "." + msg[2] + " [" + msg[3] + "]: " + msg[4]
else:
return "(" + msg[0] + ") " + msg[1] + " [" + msg[3] + "]: " + msg[4]
def process_incoming(self):
"""process the incoming messages from running pylint"""
while self.msg_queue.qsize():
try:
msg = self.msg_queue.get(0)
if msg == "DONE":
self.report_stream.output_contents()
return False
#adding message to list of msgs
self.msgs.append(msg)
#displaying msg if message type is selected in check box
if (self.msg_type_dict.get(msg[0])()):
msg_str = self.convert_to_string(msg)
self.lbMessages.insert(END, msg_str)
fg_color = COLORS.get(msg_str[:3], 'black')
self.lbMessages.itemconfigure(END, fg=fg_color)
except Queue.Empty:
pass
return True
def periodic_call(self):
"""determine when to unlock the run button"""
if self.process_incoming():
self.root.after(100, self.periodic_call)
else:
#enabling button so it can be run again
self.btnRun.config(state=NORMAL)
def mainloop(self):
"""launch the mainloop of the application"""
self.root.mainloop()
def quit(self, _=None):
"""quit the application"""
self.root.quit()
def halt(self):
"""program halt placeholder"""
return
def file_open(self, package=False, _=None):
"""launch a file browser"""
if not package:
filename = askopenfilename(parent=self.root, filetypes=[('pythonfiles', '*.py'),
('allfiles', '*')], title='Select Module')
else:
filename = askdirectory(title="Select A Folder", mustexist=1)
if filename == ():
return
self.txtModule.delete(0, END)
self.txtModule.insert(0, filename)
def update_filenames(self):
"""update the list of recent filenames"""
filename = self.txtModule.get()
if not filename:
filename = os.getcwd()
if filename+'\n' in self.filenames:
index = self.filenames.index(filename+'\n')
self.filenames.pop(index)
#ensure only 10 most recent are stored
if len(self.filenames) == 10:
self.filenames.pop()
self.filenames.insert(0, filename+'\n')
def set_history_window(self):
"""update the history window with info from the history file"""
#clear the window
self.showhistory.delete(0, END)
# keep the last 10 most recent files
try:
view_history = open(HOME+HISTORY, 'r')
for hist in view_history.readlines():
if not hist in self.filenames:
self.filenames.append(hist)
self.showhistory.insert(END, hist.split('\n')[0])
view_history.close()
except IOError:
# do nothing since history file will be created later
return
def run_lint(self, _=None):
"""launches pylint"""
self.update_filenames()
self.root.configure(cursor='watch')
self.reporter = GUIReporter(self, output=self.report_stream)
module = self.txtModule.get()
if not module:
module = os.getcwd()
#cleaning up msgs and windows
self.msgs = []
self.lbMessages.delete(0, END)
self.tabs = {}
self.results.delete(0, END)
self.btnRun.config(state=DISABLED)
#setting up a worker thread to run pylint
worker = Thread(target=lint_thread, args=(module, self.reporter, self,))
self.periodic_call()
worker.start()
# Overwrite the .pylint-gui-history file with all the new recently added files
# in order from filenames but only save last 10 files
write_history = open(HOME+HISTORY, 'w')
write_history.writelines(self.filenames)
write_history.close()
self.set_history_window()
self.root.configure(cursor='')
def lint_thread(module, reporter, gui):
"""thread for pylint"""
gui.status.text = "processing module(s)"
lint_obj = pylint.lint.Run(args=[module], reporter=reporter, exit=False)
gui.msg_queue.put("DONE")
def Run(args):
"""launch pylint gui from args"""
if args:
print 'USAGE: pylint-gui\n launch a simple pylint gui using Tk'
return
gui = LintGui()
gui.mainloop()
if __name__ == '__main__':
Run(sys.argv[1:])
|
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
import os
import posixpath
from datetime import datetime, timedelta
import random
import xml.etree.ElementTree as ElementTree
from sqlalchemy import Column, Integer, Float, String, Unicode, Boolean, DateTime, func
from sqlalchemy.schema import ForeignKey
from sqlalchemy.orm import relation
from requests import RequestException
from flexget import db_schema
from flexget.utils.tools import decode_html
from flexget.utils.requests import Session as ReqSession
from flexget.utils.database import with_session, pipe_list_synonym, text_date_synonym
from flexget.utils.sqlalchemy_utils import table_add_column
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
SCHEMA_VER = 3
log = logging.getLogger('api_tvdb')
Base = db_schema.versioned_base('api_tvdb', SCHEMA_VER)
requests = ReqSession(timeout=25)
# This is a FlexGet API key
api_key = '4D297D8CFDE0E105'
language = 'en'
server = 'http://www.thetvdb.com/api/'
_mirrors = {}
persist = SimplePersistence('api_tvdb')
@db_schema.upgrade('api_tvdb')
def upgrade(ver, session):
if ver is None:
if 'last_updated' in persist:
del persist['last_updated']
ver = 0
if ver == 0:
table_add_column('tvdb_episodes', 'gueststars', Unicode, session)
ver = 1
if ver == 1:
table_add_column('tvdb_episodes', 'absolute_number', Integer, session)
ver = 2
if ver == 2:
table_add_column('tvdb_series', 'overview', Unicode, session)
ver = 3
return ver
def get_mirror(type='xml'):
"""Returns a random mirror for a given type 'xml', 'zip', or 'banner'"""
global _mirrors
if not _mirrors.get(type):
# Get the list of mirrors from tvdb
page = None
try:
page = requests.get(server + api_key + '/mirrors.xml').content
except RequestException:
pass
# If there were problems getting the mirror list we'll just fall back to the main site.
if page:
data = ElementTree.fromstring(page)
for mirror in data.findall('Mirror'):
type_mask = int(mirror.find("typemask").text)
mirrorpath = mirror.find("mirrorpath").text
for t in [(1, 'xml'), (2, 'banner'), (4, 'zip')]:
if type_mask & t[0]:
_mirrors.setdefault(t[1], set()).add(mirrorpath)
else:
log.debug('Unable to get the mirrors list from thetvdb.')
if _mirrors.get(type):
return random.sample(_mirrors[type], 1)[0] + ('/banners/' if type == 'banner' else '/api/')
else:
# If nothing was populated from the server's mirror list, return the main site as fallback
return 'http://thetvdb.com' + ('/banners/' if type == 'banner' else '/api/')
class TVDBContainer(object):
"""Base class for TVDb objects"""
def __init__(self, init_xml=None):
if init_xml is not None:
self.update_from_xml(init_xml)
def update_from_xml(self, update_xml):
"""Populates any simple (string or number) attributes from a dict"""
for node in update_xml:
if not node.text or not node.tag:
continue
# Have to iterate to get around the inability to do a case-insensitive find
for col in self.__table__.columns:
if node.tag.lower() == col.name.lower():
if isinstance(col.type, Integer):
value = int(node.text)
elif isinstance(col.type, Float):
value = float(node.text)
else:
# Make sure we always have unicode strings
value = node.text.decode('utf-8') if isinstance(node.text, str) else node.text
value = decode_html(value)
setattr(self, col.name, value)
self.expired = False
class TVDBSeries(TVDBContainer, Base):
__tablename__ = "tvdb_series"
id = Column(Integer, primary_key=True, autoincrement=False)
lastupdated = Column(Integer)
expired = Column(Boolean)
seriesname = Column(Unicode)
language = Column(Unicode)
rating = Column(Float)
status = Column(Unicode)
runtime = Column(Integer)
airs_time = Column(Unicode)
airs_dayofweek = Column(Unicode)
contentrating = Column(Unicode)
network = Column(Unicode)
overview = Column(Unicode)
imdb_id = Column(String)
zap2it_id = Column(String)
banner = Column(String)
fanart = Column(String)
poster = Column(String)
poster_file = Column(Unicode)
_genre = Column('genre', Unicode)
genre = pipe_list_synonym('_genre')
_firstaired = Column('firstaired', DateTime)
firstaired = text_date_synonym('_firstaired')
episodes = relation('TVDBEpisode', backref='series', cascade='all, delete, delete-orphan')
def update(self):
if not self.id:
raise LookupError('Cannot update a series without a tvdb id.')
url = get_mirror() + api_key + '/series/%s/%s.xml' % (self.id, language)
try:
data = requests.get(url).content
except RequestException as e:
raise LookupError('Request failed %s' % url)
result = ElementTree.fromstring(data).find('Series')
if result is not None:
self.update_from_xml(result)
else:
raise LookupError('Could not retrieve information from thetvdb')
def get_poster(self, only_cached=False):
"""Downloads this poster to a local cache and returns the path"""
from flexget.manager import manager
base_dir = os.path.join(manager.config_base, 'userstatic')
if os.path.isfile(os.path.join(base_dir, self.poster_file or '')):
return self.poster_file
elif only_cached:
return
# If we don't already have a local copy, download one.
url = get_mirror('banner') + self.poster
log.debug('Downloading poster %s' % url)
dirname = os.path.join('tvdb', 'posters')
# Create folders if the don't exist
fullpath = os.path.join(base_dir, dirname)
if not os.path.isdir(fullpath):
os.makedirs(fullpath)
filename = os.path.join(dirname, posixpath.basename(self.poster))
thefile = file(os.path.join(base_dir, filename), 'wb')
thefile.write(requests.get(url).content)
self.poster_file = filename
# If we are detached from a session, update the db
if not Session.object_session(self):
session = Session()
try:
session.query(TVDBSeries).filter(TVDBSeries.id == self.id).update(values={'poster_file': filename})
finally:
session.close()
return filename
def __repr__(self):
return '<TVDBSeries name=%s,tvdb_id=%s>' % (self.seriesname, self.id)
class TVDBEpisode(TVDBContainer, Base):
__tablename__ = 'tvdb_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
expired = Column(Boolean)
lastupdated = Column(Integer)
seasonnumber = Column(Integer)
episodenumber = Column(Integer)
absolute_number = Column(Integer)
episodename = Column(Unicode)
overview = Column(Unicode)
_director = Column('director', Unicode)
director = pipe_list_synonym('_director')
_writer = Column('writer', Unicode)
writer = pipe_list_synonym('_writer')
_gueststars = Column('gueststars', Unicode)
gueststars = pipe_list_synonym('_gueststars')
rating = Column(Float)
filename = Column(Unicode)
_firstaired = Column('firstaired', DateTime)
firstaired = text_date_synonym('_firstaired')
series_id = Column(Integer, ForeignKey('tvdb_series.id'), nullable=False)
def update(self):
if not self.id:
raise LookupError('Cannot update an episode without an episode id.')
url = get_mirror() + api_key + '/episodes/%s/%s.xml' % (self.id, language)
try:
data = requests.get(url).content
except RequestException as e:
raise LookupError('Request failed %s' % url)
result = ElementTree.fromstring(data).find('Episode')
if result is not None:
self.update_from_xml(result)
else:
raise LookupError('Could not retrieve information from thetvdb')
def __repr__(self):
return '<TVDBEpisode series=%s,season=%s,episode=%s>' %\
(self.series.seriesname, self.seasonnumber, self.episodenumber)
class TVDBSearchResult(Base):
__tablename__ = 'tvdb_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, nullable=False)
series_id = Column(Integer, ForeignKey('tvdb_series.id'), nullable=True)
series = relation(TVDBSeries, backref='search_strings')
def find_series_id(name):
"""Looks up the tvdb id for a series"""
url = server + 'GetSeries.php?seriesname=%s&language=%s' % (urllib.quote(name), language)
try:
page = requests.get(url).content
except RequestException as e:
raise LookupError('Unable to get search results for %s: %s' % (name, e))
try:
xmldata = ElementTree.fromstring(page)
except ElementTree.ParseError as e:
log.error('error parsing tvdb result for %s: %s' % (name, e))
return
if xmldata is None:
log.error("Didn't get a return from tvdb on the series search for %s" % name)
return
# See if there is an exact match
# TODO: Check if there are multiple exact matches
firstmatch = xmldata.find('Series')
if firstmatch is not None and firstmatch.find("SeriesName").text.lower() == name.lower():
return int(firstmatch.find("seriesid").text)
# If there is no exact match, sort by airing date and pick the latest
# TODO: Is there a better way to do this? Maybe weight name similarity and air date
series_list = []
for s in xmldata.findall('Series'):
fa = s.find("FirstAired")
if fa is not None and fa.text:
series_list.append((fa.text, s.find("seriesid").text))
if series_list:
series_list.sort(key=lambda s: s[0], reverse=True)
return int(series_list[0][1])
else:
raise LookupError('No results for `%s`' % name)
@with_session
def lookup_series(name=None, tvdb_id=None, only_cached=False, session=None):
if not name and not tvdb_id:
raise LookupError('No criteria specified for tvdb lookup')
log.debug('Looking up tvdb information for %r' % {'name': name, 'tvdb_id': tvdb_id})
series = None
def id_str():
return '<name=%s,tvdb_id=%s>' % (name, tvdb_id)
if tvdb_id:
series = session.query(TVDBSeries).filter(TVDBSeries.id == tvdb_id).first()
if not series and name:
series = session.query(TVDBSeries).filter(func.lower(TVDBSeries.seriesname) == name.lower()).first()
if not series:
found = session.query(TVDBSearchResult).filter(
func.lower(TVDBSearchResult.search) == name.lower()).first()
if found and found.series:
series = found.series
if series:
# Series found in cache, update if cache has expired.
if not only_cached:
mark_expired(session=session)
if series.expired and not only_cached:
log.verbose('Data for %s has expired, refreshing from tvdb' % series.seriesname)
try:
series.update()
except LookupError as e:
log.warning('Error while updating from tvdb (%s), using cached data.' % e.args[0])
else:
log.debug('Series %s information restored from cache.' % id_str())
else:
if only_cached:
raise LookupError('Series %s not found from cache' % id_str())
# There was no series found in the cache, do a lookup from tvdb
log.debug('Series %s not found in cache, looking up from tvdb.' % id_str())
if tvdb_id:
series = TVDBSeries()
series.id = tvdb_id
series.update()
if series.seriesname:
session.add(series)
elif name:
tvdb_id = find_series_id(name)
if tvdb_id:
series = session.query(TVDBSeries).filter(TVDBSeries.id == tvdb_id).first()
if not series:
series = TVDBSeries()
series.id = tvdb_id
series.update()
session.add(series)
if name.lower() != series.seriesname.lower():
session.add(TVDBSearchResult(search=name, series=series))
if not series:
raise LookupError('No results found from tvdb for %s' % id_str())
if not series.seriesname:
raise LookupError('Tvdb result for series does not have a title.')
series.episodes
return series
@with_session
def lookup_episode(name=None, seasonnum=None, episodenum=None, absolutenum=None, airdate=None,
tvdb_id=None, only_cached=False, session=None):
# First make sure we have the series data
series = lookup_series(name=name, tvdb_id=tvdb_id, only_cached=only_cached, session=session)
if not series:
raise LookupError('Could not identify series')
# Set variables depending on what type of identifier we are looking up
if airdate:
airdatestring = airdate.strftime('%Y-%m-%d')
ep_description = '%s.%s' % (series.seriesname, airdatestring)
episode = session.query(TVDBEpisode).filter(TVDBEpisode.series_id == series.id).\
filter(TVDBEpisode.firstaired == airdate).first()
url = get_mirror() + ('GetEpisodeByAirDate.php?apikey=%s&seriesid=%d&airdate=%s&language=%s' %
(api_key, series.id, airdatestring, language))
elif absolutenum:
ep_description = '%s.%d' % (series.seriesname, absolutenum)
episode = session.query(TVDBEpisode).filter(TVDBEpisode.series_id == series.id).\
filter(TVDBEpisode.absolute_number == absolutenum).first()
url = get_mirror() + api_key + '/series/%d/absolute/%s/%s.xml' % (series.id, absolutenum, language)
else:
ep_description = '%s.S%sE%s' % (series.seriesname, seasonnum, episodenum)
# See if we have this episode cached
episode = session.query(TVDBEpisode).filter(TVDBEpisode.series_id == series.id).\
filter(TVDBEpisode.seasonnumber == seasonnum).\
filter(TVDBEpisode.episodenumber == episodenum).first()
url = get_mirror() + api_key + '/series/%d/default/%d/%d/%s.xml' % (series.id, seasonnum, episodenum, language)
if episode:
if episode.expired and not only_cached:
log.info('Data for %r has expired, refreshing from tvdb' % episode)
try:
episode.update()
except LookupError as e:
log.warning('Error while updating from tvdb (%s), using cached data.' % e.args[0])
else:
log.debug('Using episode info for %s from cache.' % ep_description)
else:
if only_cached:
raise LookupError('Episode %s not found from cache' % ep_description)
# There was no episode found in the cache, do a lookup from tvdb
log.debug('Episode %s not found in cache, looking up from tvdb.' % ep_description)
try:
raw_data = requests.get(url).content
data = ElementTree.fromstring(raw_data)
if data is not None:
error = data.find('Error') # TODO: lowercase????
if error is not None:
raise LookupError('Error looking up episode from TVDb (%s)' % error.text)
ep_data = data.find('Episode')
if ep_data is not None:
# Check if this episode id is already in our db
episode = session.query(TVDBEpisode).filter(TVDBEpisode.id == ep_data.find("id").text).first()
if episode is not None:
episode.update_from_xml(ep_data)
else:
episode = TVDBEpisode(ep_data)
series.episodes.append(episode)
session.merge(series)
except RequestException as e:
raise LookupError('Error looking up episode from TVDb (%s)' % e)
if episode:
# Access the series attribute to force it to load before returning
episode.series
return episode
else:
raise LookupError('No results found for ')
@with_session
def mark_expired(session=None):
"""Marks series and episodes that have expired since we cached them"""
# Only get the expired list every hour
last_server = persist.get('last_server')
last_local = persist.get('last_local')
if not last_local:
# Never run before? Lets reset ALL series
log.info('Setting all series to expire')
session.query(TVDBSeries).update({'expired': True}, 'fetch')
persist['last_local'] = datetime.now()
return
elif last_local + timedelta(hours=6) > datetime.now():
# It has been less than an hour, don't check again yet
return
if not last_server:
last_server = 0
#Need to figure out what type of update file to use
#Default of day
get_update = 'day'
last_update_days = (datetime.now() - last_local).days
if 1 < last_update_days < 7:
get_update = 'week'
elif last_update_days > 7:
get_update = 'month'
try:
# Get items that have changed since our last update
log.debug("Getting %s worth of updates from thetvdb" % get_update)
content = requests.get(server + api_key + '/updates/updates_%s.xml' % get_update).content
if not isinstance(content, basestring):
raise Exception('expected string, got %s' % type(content))
updates = ElementTree.fromstring(content)
except RequestException as e:
log.error('Could not get update information from tvdb: %s' % e)
return
if updates is not None:
new_server = int(updates.attrib['time'])
if new_server < last_server:
#nothing changed on the server, ignoring
log.debug("Not checking for expired as nothing has changed on server")
return
# Make lists of expired series and episode ids
expired_series = []
expired_episodes = []
for series in updates.findall('Series'):
expired_series.append(int(series.find("id").text))
for episode in updates.findall('Episode'):
expired_series.append(int(episode.find("id").text))
def chunked(seq):
"""Helper to divide our expired lists into sizes sqlite can handle in a query. (<1000)"""
for i in xrange(0, len(seq), 900):
yield seq[i:i + 900]
# Update our cache to mark the items that have expired
for chunk in chunked(expired_series):
num = session.query(TVDBSeries).filter(TVDBSeries.id.in_(chunk)).update({'expired': True}, 'fetch')
log.debug('%s series marked as expired' % num)
for chunk in chunked(expired_episodes):
num = session.query(TVDBEpisode).filter(TVDBEpisode.id.in_(chunk)).update({'expired': True}, 'fetch')
log.debug('%s episodes marked as expired' % num)
# Save the time of this update
persist['last_local'] = datetime.now()
persist['last_server'] = new_server
|
|
##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
"""
Interactive shell for terminals.
"""
__all__ = [
"usage",
"ShellOptions",
"ShellService",
"ShellProtocol",
"main",
]
import string
import os
import sys
import tty
import termios
from shlex import shlex
from twisted.python.failure import Failure
from twisted.python.text import wordWrap
from twisted.python.usage import Options, UsageError
from twisted.internet.defer import Deferred, succeed
from twisted.internet.defer import inlineCallbacks
from twisted.internet.stdio import StandardIO
from twisted.conch.recvline import HistoricRecvLine as ReceiveLineProtocol
from twisted.conch.insults.insults import ServerProtocol
from twext.python.log import Logger
from txdav.common.icommondatastore import NotFoundError
from twistedcaldav.stdconfig import DEFAULT_CONFIG_FILE
from calendarserver.tools.cmdline import utilityMain, WorkerService
from calendarserver.tools.shell.cmd import Commands, UsageError as CommandUsageError
log = Logger()
def usage(e=None):
if e:
print(e)
print("")
try:
ShellOptions().opt_help()
except SystemExit:
pass
if e:
sys.exit(64)
else:
sys.exit(0)
class ShellOptions(Options):
"""
Command line options for "calendarserver_shell".
"""
synopsis = "\n".join(
wordWrap(
"""
Usage: calendarserver_shell [options]\n
""" + __doc__,
int(os.environ.get("COLUMNS", "80"))
)
)
optParameters = [
["config", "f", DEFAULT_CONFIG_FILE, "Specify caldavd.plist configuration path."],
]
def __init__(self):
super(ShellOptions, self).__init__()
class ShellService(WorkerService, object):
"""
A L{ShellService} collects all the information that a shell needs to run;
when run, it invokes the shell on stdin/stdout.
@ivar store: the calendar / addressbook store.
@type store: L{txdav.idav.IDataStore}
@ivar directory: the directory service, to look up principals' names
@type directory: L{twistedcaldav.directory.idirectory.IDirectoryService}
@ivar options: the command-line options used to create this shell service
@type options: L{ShellOptions}
@ivar reactor: the reactor under which this service is running
@type reactor: L{IReactorTCP}, L{IReactorTime}, L{IReactorThreads} etc
@ivar config: the configuration associated with this shell service.
@type config: L{twistedcaldav.config.Config}
"""
def __init__(self, store, options, reactor, config):
super(ShellService, self).__init__(store)
self.directory = store.directoryService()
self.options = options
self.reactor = reactor
self.config = config
self.terminalFD = None
self.protocol = None
def doWork(self):
"""
Service startup.
"""
# Set up the terminal for interactive action
self.terminalFD = sys.__stdin__.fileno()
self._oldTerminalSettings = termios.tcgetattr(self.terminalFD)
tty.setraw(self.terminalFD)
self.protocol = ServerProtocol(lambda: ShellProtocol(self))
StandardIO(self.protocol)
return succeed(None)
def postStartService(self):
"""
Don't quit right away
"""
pass
def stopService(self):
"""
Stop the service.
"""
# Restore terminal settings
termios.tcsetattr(self.terminalFD, termios.TCSANOW, self._oldTerminalSettings)
os.write(self.terminalFD, "\r\x1bc\r")
class ShellProtocol(ReceiveLineProtocol):
"""
Data store shell protocol.
@ivar service: a service representing the running shell
@type service: L{ShellService}
"""
# FIXME:
# * Received lines are being echoed; find out why and stop it.
# * Backspace transposes characters in the terminal.
ps = ("ds% ", "... ")
emulation_modes = ("emacs", "none")
def __init__(self, service, commandsClass=Commands):
ReceiveLineProtocol.__init__(self)
self.service = service
self.inputLines = []
self.commands = commandsClass(self)
self.activeCommand = None
self.emulate = "emacs"
def reloadCommands(self):
# FIXME: doesn't work for alternative Commands classes passed
# to __init__.
self.terminal.write("Reloading commands class...\n")
import calendarserver.tools.shell.cmd
reload(calendarserver.tools.shell.cmd)
self.commands = calendarserver.tools.shell.cmd.Commands(self)
#
# Input handling
#
def connectionMade(self):
ReceiveLineProtocol.connectionMade(self)
self.keyHandlers['\x03'] = self.handle_INT # Control-C
self.keyHandlers['\x04'] = self.handle_EOF # Control-D
self.keyHandlers['\x1c'] = self.handle_QUIT # Control-\
self.keyHandlers['\x0c'] = self.handle_FF # Control-L
# self.keyHandlers['\t' ] = self.handle_TAB # Tab
if self.emulate == "emacs":
# EMACS key bindinds
self.keyHandlers['\x10'] = self.handle_UP # Control-P
self.keyHandlers['\x0e'] = self.handle_DOWN # Control-N
self.keyHandlers['\x02'] = self.handle_LEFT # Control-B
self.keyHandlers['\x06'] = self.handle_RIGHT # Control-F
self.keyHandlers['\x01'] = self.handle_HOME # Control-A
self.keyHandlers['\x05'] = self.handle_END # Control-E
def observer(event):
if not event["isError"]:
return
text = log.textFromEventDict(event)
if text is None:
return
self.service.reactor.callFromThread(self.terminal.write, text)
log.startLoggingWithObserver(observer)
def handle_INT(self):
return self.resetInputLine()
def handle_EOF(self):
if self.lineBuffer:
if self.emulate == "emacs":
self.handle_DELETE()
else:
self.terminal.write("\a")
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a "form feed" byte - generally used to request a screen
refresh/redraw.
"""
# FIXME: Clear screen != redraw screen.
return self.clearScreen()
def handle_QUIT(self):
return self.exit()
def handle_TAB(self):
return self.completeLine()
#
# Utilities
#
def clearScreen(self):
"""
Clear the display.
"""
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.drawInputLine()
def resetInputLine(self):
"""
Reset the current input variables to their initial state.
"""
self.pn = 0
self.lineBuffer = []
self.lineBufferIndex = 0
self.terminal.nextLine()
self.drawInputLine()
@inlineCallbacks
def completeLine(self):
"""
Perform auto-completion on the input line.
"""
# Tokenize the text before the cursor
tokens = self.tokenize("".join(self.lineBuffer[:self.lineBufferIndex]))
if tokens:
if len(tokens) == 1 and self.lineBuffer[-1] in string.whitespace:
word = ""
else:
word = tokens[-1]
cmd = tokens.pop(0)
else:
word = cmd = ""
if cmd and (tokens or word == ""):
# Completing arguments
m = getattr(self.commands, "complete_%s" % (cmd,), None)
if not m:
return
try:
completions = tuple((yield m(tokens)))
except Exception, e:
self.handleFailure(Failure(e))
return
log.info("COMPLETIONS: %r" % (completions,))
else:
# Completing command name
completions = tuple(self.commands.complete_commands(cmd))
if len(completions) == 1:
for c in completions.__iter__().next():
self.characterReceived(c, True)
# FIXME: Add a space only if we know we've fully completed the term.
# self.characterReceived(" ", False)
else:
self.terminal.nextLine()
for completion in completions:
# FIXME Emitting these in columns would be swell
self.terminal.write("%s%s\n" % (word, completion))
self.drawInputLine()
def exit(self):
"""
Exit.
"""
self.terminal.loseConnection()
self.service.reactor.stop()
def handleFailure(self, f):
"""
Handle a failure raises in the interpreter by printing a
traceback and resetting the input line.
"""
if self.lineBuffer:
self.terminal.nextLine()
self.terminal.write("Error: %s !!!" % (f.value,))
if not f.check(NotImplementedError, NotFoundError):
log.info(f.getTraceback())
self.resetInputLine()
#
# Command dispatch
#
def lineReceived(self, line):
if self.activeCommand is not None:
self.inputLines.append(line)
return
tokens = self.tokenize(line)
if tokens:
cmd = tokens.pop(0)
# print("Arguments: %r" % (tokens,))
m = getattr(self.commands, "cmd_%s" % (cmd,), None)
if m:
def handleUsageError(f):
f.trap(CommandUsageError)
self.terminal.write("%s\n" % (f.value,))
doc = self.commands.documentationForCommand(cmd)
if doc:
self.terminal.nextLine()
self.terminal.write(doc)
self.terminal.nextLine()
def next(_):
self.activeCommand = None
if self.inputLines:
line = self.inputLines.pop(0)
self.lineReceived(line)
d = self.activeCommand = Deferred()
d.addCallback(lambda _: m(tokens))
if True:
d.callback(None)
else:
# Add time to test callbacks
self.service.reactor.callLater(4, d.callback, None)
d.addErrback(handleUsageError)
d.addCallback(lambda _: self.drawInputLine())
d.addErrback(self.handleFailure)
d.addCallback(next)
else:
self.terminal.write("Unknown command: %s\n" % (cmd,))
self.drawInputLine()
else:
self.drawInputLine()
@staticmethod
def tokenize(line):
"""
Tokenize input line.
@return: an iterable of tokens
"""
lexer = shlex(line)
lexer.whitespace_split = True
tokens = []
while True:
token = lexer.get_token()
if not token:
break
tokens.append(token)
return tokens
def main(argv=sys.argv, stderr=sys.stderr, reactor=None):
if reactor is None:
from twisted.internet import reactor
options = ShellOptions()
try:
options.parseOptions(argv[1:])
except UsageError, e:
usage(e)
def makeService(store):
from twistedcaldav.config import config
return ShellService(store, options, reactor, config)
print("Initializing shell...")
utilityMain(options["config"], makeService, reactor)
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
unescapeHTML,
urlencode_postdata,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'''(?x)
https?://
www\.udemy\.com/
(?:
[^#]+\#/lecture/|
lecture/view/?\?lectureId=|
[^/]+/learn/v4/t/lecture/
)
(?P<id>\d+)
'''
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}, {
# new URL schema
'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906',
'only_matching': True,
}]
def _extract_course_info(self, webpage, video_id):
course = self._parse_json(
unescapeHTML(self._search_regex(
r'ng-init=["\'].*\bcourse=({.+?});', webpage, 'course', default='{}')),
video_id, fatal=False) or {}
course_id = course.get('id') or self._search_regex(
(r'"id"\s*:\s*(\d+)', r'data-course-id=["\'](\d+)'),
webpage, 'course id')
return course_id, course.get('title')
def _enroll_course(self, base_url, webpage, course_id):
def combine_url(base_url, url):
return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url
checkout_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/payment/checkout/.+?)\1',
webpage, 'checkout url', group='url', default=None))
if checkout_url:
raise ExtractorError(
'Course %s is not free. You have to pay for it before you can download. '
'Use this URL to confirm purchase: %s'
% (course_id, combine_url(base_url, checkout_url)),
expected=True)
enroll_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1',
webpage, 'enroll url', group='url', default=None))
if enroll_url:
webpage = self._download_webpage(
combine_url(base_url, enroll_url),
course_id, 'Enrolling in the course',
headers={'Referer': base_url})
if '>You have enrolled in' in webpage:
self.to_screen('%s: Successfully enrolled in the course' % course_id)
def _download_lecture(self, course_id, lecture_id):
return self._download_json(
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?'
% (course_id, lecture_id),
lecture_id, 'Downloading lecture JSON', query={
'fields[lecture]': 'title,description,view_html,asset',
'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data',
})
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_json(self, url_or_request, *args, **kwargs):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = sanitized_Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\'](?:https://www\.udemy\.com)?/user/logout/',
r'>Logout<'))
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username,
'password': password,
})
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in as %s' % username,
data=urlencode_postdata(login_form),
headers={
'Referer': self._ORIGIN_URL,
'Origin': self._ORIGIN_URL,
})
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
course_id, _ = self._extract_course_info(webpage, lecture_id)
try:
lecture = self._download_lecture(course_id, lecture_id)
except ExtractorError as e:
# Error could possibly mean we are not enrolled in the course
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self._enroll_course(url, webpage, course_id)
lecture = self._download_lecture(course_id, lecture_id)
else:
raise
title = lecture['title']
description = lecture.get('description')
asset = lecture['asset']
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
stream_url = asset.get('stream_url') or asset.get('streamUrl')
if stream_url:
youtube_url = self._search_regex(
r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_id = compat_str(asset['id'])
thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl')
duration = float_or_none(asset.get('data', {}).get('duration'))
subtitles = {}
automatic_captions = {}
formats = []
def extract_output_format(src, f_id):
return {
'url': src['url'],
'format_id': '%sp' % (src.get('height') or f_id),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
'vbr': int_or_none(src.get('video_bitrate_in_kbps')),
'vcodec': src.get('video_codec'),
'fps': int_or_none(src.get('frame_rate')),
'abr': int_or_none(src.get('audio_bitrate_in_kbps')),
'acodec': src.get('audio_codec'),
'asr': int_or_none(src.get('audio_sample_rate')),
'tbr': int_or_none(src.get('total_bitrate_in_kbps')),
'filesize': int_or_none(src.get('file_size_in_bytes')),
}
outputs = asset.get('data', {}).get('outputs')
if not isinstance(outputs, dict):
outputs = {}
def add_output_format_meta(f, key):
output = outputs.get(key)
if isinstance(output, dict):
output_format = extract_output_format(output, key)
output_format.update(f)
return output_format
return f
def extract_formats(source_list):
if not isinstance(source_list, list):
return
for source in source_list:
video_url = source.get('file') or source.get('src')
if not video_url or not isinstance(video_url, compat_str):
continue
format_id = source.get('label')
f = {
'url': video_url,
'format_id': '%sp' % format_id,
'height': int_or_none(format_id),
}
if format_id:
# Some videos contain additional metadata (e.g.
# https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
f = add_output_format_meta(f, format_id)
formats.append(f)
download_urls = asset.get('download_urls')
if isinstance(download_urls, dict):
extract_formats(download_urls.get('Video'))
view_html = lecture.get('view_html')
if view_html:
view_html_urls = set()
for source in re.findall(r'<source[^>]+>', view_html):
attributes = extract_attributes(source)
src = attributes.get('src')
if not src:
continue
res = attributes.get('data-res')
height = int_or_none(res)
if src in view_html_urls:
continue
view_html_urls.add(src)
if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in m3u8_formats:
m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url'])
if m:
if not f.get('height'):
f['height'] = int(m.group('height'))
if not f.get('tbr'):
f['tbr'] = int(m.group('tbr'))
formats.extend(m3u8_formats)
else:
formats.append(add_output_format_meta({
'url': src,
'format_id': '%dp' % height if height else None,
'height': height,
}, res))
# react rendition since 2017.04.15 (see
# https://github.com/rg3/youtube-dl/issues/12744)
data = self._parse_json(
self._search_regex(
r'videojs-setup-data=(["\'])(?P<data>{.+?})\1', view_html,
'setup data', default='{}', group='data'), video_id,
transform_source=unescapeHTML, fatal=False)
if data and isinstance(data, dict):
extract_formats(data.get('sources'))
if not duration:
duration = int_or_none(data.get('duration'))
tracks = data.get('tracks')
if isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
src = track.get('src')
if not src or not isinstance(src, compat_str):
continue
lang = track.get('language') or track.get(
'srclang') or track.get('label')
sub_dict = automatic_captions if track.get(
'autogenerated') is True else subtitles
sub_dict.setdefault(lang, []).append({
'url': src,
})
self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://(?:www\.)?udemy\.com/(?P<id>[^/?#&]+)'
_TESTS = []
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_path = self._match_id(url)
webpage = self._download_webpage(url, course_path)
course_id, title = self._extract_course_info(webpage, course_path)
self._enroll_course(url, webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id,
course_id, 'Downloading course curriculum', query={
'fields[chapter]': 'title,object_index',
'fields[lecture]': 'title,asset',
'page_size': '1000',
})
entries = []
chapter, chapter_number = [None] * 2
for entry in response['results']:
clazz = entry.get('_class')
if clazz == 'lecture':
asset = entry.get('asset')
if isinstance(asset, dict):
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
continue
lecture_id = entry.get('id')
if lecture_id:
entry = {
'_type': 'url_transparent',
'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']),
'title': entry.get('title'),
'ie_key': UdemyIE.ie_key(),
}
if chapter_number:
entry['chapter_number'] = chapter_number
if chapter:
entry['chapter'] = chapter
entries.append(entry)
elif clazz == 'chapter':
chapter_number = entry.get('object_index')
chapter = entry.get('title')
return self.playlist_result(entries, course_id, title)
|
|
from __future__ import absolute_import, division, print_function
from operator import attrgetter
import os
import re
import subprocess
from itertools import chain
from collections import Iterator
from datetime import datetime, date
from distutils.spawn import find_executable
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from multipledispatch import MDNotImplementedError
import datashape
from datashape import DataShape, Record, Option, var, dshape
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover
from datashape.dispatch import dispatch
from toolz import (partition_all, keyfilter, memoize, valfilter,
identity, concat, curry, merge)
from toolz.curried import pluck, map
from ..compatibility import unicode
from ..utils import keywords, ignoring, iter_except
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = (int, float, datetime, date, bool, str)
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.types.BigInteger,
'int32': sa.types.Integer,
'int': sa.types.Integer,
'int16': sa.types.SmallInteger,
'float32': sa.types.Float(precision=24), # sqlalchemy uses mantissa
'float64': sa.types.Float(precision=53), # for precision
'float': sa.types.Float(precision=53),
'real': sa.types.Float(precision=53),
'string': sa.types.Text,
'date': sa.types.Date,
'time': sa.types.Time,
'datetime': sa.types.DateTime,
'bool': sa.types.Boolean,
"timedelta[unit='D']": sa.types.Interval(second_precision=0,
day_precision=9),
"timedelta[unit='h']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='m']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='s']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='ms']": sa.types.Interval(second_precision=3,
day_precision=0),
"timedelta[unit='us']": sa.types.Interval(second_precision=6,
day_precision=0),
"timedelta[unit='ns']": sa.types.Interval(second_precision=9,
day_precision=0),
# ??: sa.types.LargeBinary,
# Decimal: sa.types.Numeric,
# ??: sa.types.PickleType,
# unicode: sa.types.Unicode,
# unicode: sa.types.UnicodeText,
# str: sa.types.Text, # ??
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({
sa.types.DATETIME: 'datetime',
sa.types.TIMESTAMP: 'datetime',
sa.types.FLOAT: 'float64',
sa.types.DATE: 'date',
sa.types.BIGINT: 'int64',
sa.types.INTEGER: 'int',
sa.types.NUMERIC: 'float64', # TODO: extend datashape to decimal
sa.types.BIGINT: 'int64',
sa.types.NullType: 'string',
sa.types.Float: 'float64',
})
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.types.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def batch(sel, chunksize=10000):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowterator(sel, chunksize=chunksize):
with sel.bind.connect() as conn:
result = conn.execute(sel)
yield result.keys()
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
terator = rowterator(sel)
return next(terator), concat(terator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.types.Interval(day_precision=0,
second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.types.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.types.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return dshape(revtypes[type(typ)])[0]
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, typ.collation)
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.Column)
def discover_sqlalchemy_column(col):
optionify = Option if col.nullable else identity
return Record([[col.name, optionify(discover(col.type))]])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
records = list(sum([discover(c).parameters[0] for c in t.columns], ()))
return var * Record(records)
@memoize
def metadata_of_engine(engine, schema=None):
return sa.MetaData(engine, schema=schema)
def create_engine(uri, *args, **kwargs):
if ':memory:' in uri:
return sa.create_engine(uri, *args, **kwargs)
else:
return memoized_create_engine(uri, *args, **kwargs)
memoized_create_engine = memoize(sa.create_engine)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = metadata_of_engine(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
metadata = metadata_of_engine(engine)
return discover(metadata)
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % "\n\t".join(e.args) +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def dshape_to_table(name, ds, metadata=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if metadata is None:
metadata = sa.MetaData()
cols = dshape_to_alchemy(ds)
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = metadata_of_engine(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata)
t.create()
return engine
def dshape_to_alchemy(dshape):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(typ),
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1])
else:
return dshape_to_alchemy(dshape[0])
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.types.Text
string_types = dict(U=sa.types.Unicode, A=sa.types.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
if dshape.tz:
return sa.types.DateTime(timezone=True)
else:
return sa.types.DateTime(timezone=False)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, **kwargs):
_, rows = batch(sa.select([t]))
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel)
return func(rows)
@convert.register(base, sa.sql.Select, cost=300.0)
def select_to_base(sel, dshape=None, **kwargs):
assert not dshape or isscalar(dshape), \
'dshape should be None or scalar, got %s' % dshape
with sel.bind.connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, **kwargs):
assert not isinstance(t, type)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
engine = t.bind
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
# This condition is an ugly kludge and should be removed once
# https://github.com/dropbox/PyHive/issues/15 is resolved
if t.bind.name == o.bind.name == 'hive':
with t.bind.connect() as conn:
conn.execute('INSERT INTO TABLE %s SELECT * FROM %s' %
(t.name, o.name))
return t
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, **kwargs):
if not o.bind == t.bind:
return append(t, convert(Iterator, o, **kwargs), **kwargs)
assert o.bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with o.bind.connect() as conn:
conn.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(obj,
'before_create',
ddl.execute_if(callable_=should_create_schema,
dialect='postgresql'))
return obj
def fullname(table, compiler):
preparer = compiler.dialect.identifier_preparer
fullname = preparer.quote_identifier(table.name)
schema = table.schema
if schema is not None:
fullname = '%s.%s' % (preparer.quote_schema(schema), fullname)
return fullname
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
kwargs2 = keyfilter(keywords(sa.create_engine).__contains__, kwargs)
engine = create_engine(uri, **kwargs2)
ds = kwargs.get('dshape')
schema = kwargs.get('schema')
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(sa.Table(table_name, metadata, autoload=True,
autoload_with=engine, schema=schema),
schema)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table):
table.drop(table.bind, checkfirst=True)
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
def select_or_selectable_to_frame(el, **kwargs):
columns, rows = batch(el)
row = next(rows, None)
if row is None:
return pd.DataFrame(columns=columns)
return pd.DataFrame(list(chain([tuple(row)], map(tuple, rows))),
columns=columns)
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(self, element, path, delimiter=',', quotechar='"',
lineterminator=r'\n', escapechar='\\', header=True,
na_value=''):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
# mysql cannot write headers
self.header = header and element.bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
istable = isinstance(selectable, sa.Table)
template = """COPY %s TO '{path}'
WITH CSV {header}
DELIMITER '{delimiter}'
QUOTE '{quotechar}'
NULL '{na_value}'
ESCAPE '{escapechar}'
""" % ('{query}' if istable else '({query})')
processed = (fullname(selectable, compiler)
if istable else compiler.process(selectable))
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
return template.format(query=processed,
path=element.path,
header='HEADER' if element.header else '',
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
if isinstance(selectable, sa.Table):
processed = 'SELECT * FROM %(table)s' % dict(table=selectable.name)
else:
processed = compiler.process(selectable)
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
template = """{query} INTO OUTFILE '{path}'
FIELDS TERMINATED BY '{delimiter}'
OPTIONALLY ENCLOSED BY '{quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED BY '{lineterminator}'"""
return template.format(query=processed,
path=element.path,
delimiter=element.delimiter,
lineterminator=element.lineterminator,
escapechar=element.escapechar.encode('unicode-escape').decode(),
quotechar=element.quotechar)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
selectable = element.element
sql = (compiler.process(sa.select([selectable])
if isinstance(selectable, sa.Table)
else selectable) + ';')
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode()
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(selectable, os.path.abspath(csv.path), **kwargs)
with selectable.bind.begin() as conn:
conn.execute(stmt)
csv.has_header = stmt.header
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
import webob
from webob import exc
from jacket.api.storage import extensions
from jacket.api.storage.openstack import wsgi
from jacket.storage import backup
from jacket.db import storage as db
from jacket.storage import exception
from jacket.storage.i18n import _
from jacket.objects import storage
from jacket import rpc
from jacket.storage import utils
from jacket.storage import volume
LOG = logging.getLogger(__name__)
class AdminController(wsgi.Controller):
"""Abstract base class for AdminControllers."""
collection = None # api collection to extend
# FIXME(clayg): this will be hard to keep up-to-date
# Concrete classes can expand or over-ride
valid_status = set(['creating',
'available',
'deleting',
'error',
'error_deleting', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _delete(self, *args, **kwargs):
raise NotImplementedError()
def validate_update(self, body):
update = {}
try:
update['status'] = body['status'].lower()
except (TypeError, KeyError):
raise exc.HTTPBadRequest(explanation=_("Must specify 'status'"))
if update['status'] not in self.valid_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid status"))
return update
def authorize(self, context, action_name):
# e.g. "snapshot_admin_actions:reset_status"
action = '%s_admin_actions:%s' % (self.resource_name, action_name)
extensions.extension_authorizer('volume', action)(context)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
def _clean_volume_attachment(context, id):
attachments = (
db.volume_attachment_get_used_by_volume_id(context, id))
for attachment in attachments:
db.volume_detached(context, id, attachment.id)
db.volume_admin_metadata_delete(context, id,
'attached_mode')
context = req.environ['storage.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self._update(context, id, update)
if update.get('attach_status') == 'detached':
_clean_volume_attachment(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
return webob.Response(status_int=202)
@wsgi.action('os-force_delete')
def _force_delete(self, req, id, body):
"""Delete a resource, bypassing the check that it must be available."""
context = req.environ['storage.context']
self.authorize(context, 'force_delete')
try:
resource = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
self._delete(context, resource, force=True)
return webob.Response(status_int=202)
class VolumeAdminController(AdminController):
"""AdminController for Volumes."""
collection = 'volumes'
# FIXME(jdg): We're appending additional valid status
# entries to the set we declare in the parent class
# this doesn't make a ton of sense, we should probably
# look at the structure of this whole process again
# Perhaps we don't even want any definitions in the abstract
# parent class?
valid_status = AdminController.valid_status.union(
('attaching', 'in-use', 'detaching', 'maintenance'))
valid_attach_status = ('detached', 'attached',)
valid_migration_status = ('migrating', 'error',
'success', 'completing',
'none', 'starting',)
def _update(self, *args, **kwargs):
db.volume_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete(*args, **kwargs)
def validate_update(self, body):
update = {}
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
valid = False
if status:
valid = True
update = super(VolumeAdminController, self).validate_update(body)
if attach_status:
valid = True
update['attach_status'] = attach_status.lower()
if update['attach_status'] not in self.valid_attach_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid attach status"))
if migration_status:
valid = True
update['migration_status'] = migration_status.lower()
if update['migration_status'] not in self.valid_migration_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid migration status"))
if update['migration_status'] == 'none':
update['migration_status'] = None
if not valid:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'status', 'attach_status' "
"or 'migration_status' for update."))
return update
@wsgi.action('os-force_detach')
def _force_detach(self, req, id, body):
"""Roll back a bad detach after the volume been disconnected."""
context = req.environ['storage.context']
self.authorize(context, 'force_detach')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
try:
connector = body['os-force_detach'].get('connector', None)
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector'."))
try:
self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException as error:
msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
attachment_id = body['os-force_detach'].get('attachment_id', None)
try:
self.volume_api.detach(context, volume, attachment_id)
except messaging.RemoteError as error:
if error.exc_type in ['VolumeAttachmentNotFound',
'InvalidVolume']:
msg = "Error force detaching volume - %(err_type)s: " \
"%(err_msg)s" % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where force-detach call could fail
# due to storage or volume driver errors. These errors shouldn't
# be exposed to the user and in such cases it should raise
# 500 error.
raise
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['storage.context']
self.authorize(context, 'migrate_volume')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
params = body['os-migrate_volume']
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'."))
force_host_copy = utils.get_bool_param('force_host_copy', params)
lock_volume = utils.get_bool_param('lock_volume', params)
self.volume_api.migrate_volume(context, volume, host, force_host_copy,
lock_volume)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['storage.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
params = body['os-migrate_volume_completion']
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'new_volume'"))
try:
new_volume = self._get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
def _update(self, *args, **kwargs):
context = args[0]
snapshot_id = args[1]
fields = args[2]
snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
snapshot.update(fields)
snapshot.save()
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
def _get(self, *args, **kwargs):
return self.backup_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.backup_api.delete(*args, **kwargs)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['storage.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.BackupNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
exts.append(extension)
return exts
|
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2012 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functionality for reading from and writing to the
adjacency list format used by Reaction Mechanism Generator (RMG).
"""
import logging
import re
from .molecule import Atom, Bond, getAtomType
from .group import GroupAtom, GroupBond
from .element import getElement, PeriodicSystem
from rmgpy.exceptions import InvalidAdjacencyListError
class Saturator(object):
@staticmethod
def saturate(atoms):
'''
Returns a list of atoms that is extended
(and bond attributes) by saturating the valency of the non-hydrogen atoms with an
appropriate number of hydrogen atoms.
The required number of hydrogen atoms per heavy atom is determined as follows:
H's = max number of valence electrons - atom.radicalElectrons
- 2* atom.lonePairs - order - atom.charge
'''
newAtoms = []
for atom in atoms:
try:
max_number_of_valence_electrons = PeriodicSystem.valence_electrons[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown orbital for atom "{0}".'.format(atom.symbol))
order = atom.getBondOrdersForAtom()
number_of_H_to_be_added = max_number_of_valence_electrons - atom.radicalElectrons - 2* atom.lonePairs - int(order) - atom.charge
if number_of_H_to_be_added < 0:
raise InvalidAdjacencyListError('Incorrect electron configuration on atom.')
for _ in range(number_of_H_to_be_added):
a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0)
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
class ConsistencyChecker(object):
@staticmethod
def check_partial_charge(atom):
'''
Checks whether the partial charge attribute of the atom checks out with
the theoretical one:
'''
valence = PeriodicSystem.valence_electrons[atom.symbol]
order = atom.getBondOrdersForAtom()
theoretical = valence - order - atom.radicalElectrons - 2*atom.lonePairs
if atom.charge != theoretical:
raise InvalidAdjacencyListError(
('Invalid valency for atom {symbol} ({type}) with {radicals} unpaired electrons, '
'{lonePairs} pairs of electrons, {charge} charge, and bonds [{bonds}].'
).format(symbol=atom.symbol,
type=getAtomType(atom, atom.edges).label,
radicals=atom.radicalElectrons,
lonePairs=atom.lonePairs,
charge=atom.charge,
bonds=','.join([str(bond.order) for bond in atom.bonds.values()])
))
@staticmethod
def check_multiplicity(nRad, multiplicity):
'''
Check that the multiplicity complies with the formula: m = 2s + 1,
where s is the sum of the spin [+/- (1/2) ] of the unpaired electrons
For a simple radical (nRad = 1):
s = +1/2 , m = 2 (doublet)
For a biradical, s can be either 0 [+0.5 + (-0.5) ] or 1 [+0.5 + (+0.5) ]
and m = 1 (singlet) or m = 3 (triplet).
'''
if nRad in [0,1]:
if multiplicity != (nRad + 1):
raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
elif nRad == 2:
if not int(multiplicity) in [1,3]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
elif nRad == 3:
if not int(multiplicity) in [4,2]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
elif nRad == 4:
if not int(multiplicity) in [5,3,1]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad))
else: logging.warning("Consistency checking of multiplicity of molecules with more than 4 unpaired electrons is not implemented yet!")
@staticmethod
def check_hund_rule(atom, multiplicity):
'''
It is checked whether atoms with 2 unpaired electrons on the same atom
result in a multiplicity of 3, and not 1.
Unpaired electrons in 2 different orbitals belonging to the same atom
should have the same spin, and hence, should result in a multiplicity of 3.
'''
if atom.radicalElectrons == 2 and multiplicity == 1:
raise InvalidAdjacencyListError("Violation of hund's rule. Invalid multiplicity of {0} because there is an atom with {1} unpaired electrons"
.format(multiplicity, atom.radicalElectrons))
################################################################################
def fromOldAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a pre-June-2014 string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
It can read both "old style" that existed for years, an the "intermediate style" that
existed for a few months in 2014, with the extra column of integers for lone pairs.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError("""Error in adjacency list\n{0}\nNo atoms specified.""".format(adjlist))
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Error in adjacency list: \n{1}\nspecies shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group(), adjlist)
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = [];
additionalLonePairs = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
if len(elecState) == 0:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nThere must be some electronic state defined for an old adjlist".format(adjlist))
for e in elecState:
if e == '0':
radicalElectrons.append(0); additionalLonePairs.append(0)
elif e == '1':
radicalElectrons.append(1); additionalLonePairs.append(0)
elif e == '2':
if not group:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 2 is not specific enough. Please use 2S or 2T.".format(adjlist))
# includes 2S and 2T
radicalElectrons.append(0); additionalLonePairs.append(1)
radicalElectrons.append(2); additionalLonePairs.append(0)
elif e == '2S':
radicalElectrons.append(0); additionalLonePairs.append(1)
elif e == '2T':
radicalElectrons.append(2); additionalLonePairs.append(0)
elif e == '3':
if not group:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 3 is not specific enough. Please use 3D or 3Q.".format(adjlist))
# includes 3D and 3Q
radicalElectrons.append(1); additionalLonePairs.append(1)
radicalElectrons.append(3); additionalLonePairs.append(0)
elif e == '3D':
radicalElectrons.append(1); additionalLonePairs.append(1)
elif e == '3Q':
radicalElectrons.append(3); additionalLonePairs.append(0)
elif e == '4':
if not group:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 4 is not specific enough. Please use 4S, 4T, or 4V.".format(adjlist))
# includes 4S, 4T, and 4V
radicalElectrons.append(0); additionalLonePairs.append(2)
radicalElectrons.append(2); additionalLonePairs.append(1)
radicalElectrons.append(4); additionalLonePairs.append(0)
elif e == '4S':
radicalElectrons.append(0); additionalLonePairs.append(2)
elif e == '4T':
radicalElectrons.append(2); additionalLonePairs.append(1)
elif e == '4V':
radicalElectrons.append(4); additionalLonePairs.append(0)
elif e == 'X':
if not group:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = X is not specific enough. Wildcards should only be used for groups.".format(adjlist))
radicalElectrons = []
index += 1
# Next number defines the number of lone electron pairs (if provided)
lonePairsOfElectrons = None
if len(data) > index:
lpState = data[index]
if lpState[0] == '{':
# this is the start of the chemical bonds - no lone pair info was provided
lonePairsOfElectrons = None
else:
if lpState == '0':
lonePairsOfElectrons = 0
if lpState == '1':
lonePairsOfElectrons = 1
if lpState == '2':
lonePairsOfElectrons = 2
if lpState == '3':
lonePairsOfElectrons = 3
if lpState == '4':
lonePairsOfElectrons = 4
index += 1
else: # no bonds or lone pair info provided.
lonePairsOfElectrons = None
# Create a new atom based on the above information
if group:
if lonePairsOfElectrons is not None:
lonePairsOfElectrons = [additional + lonePairsOfElectrons for additional in additionalLonePairs]
atom = GroupAtom(atomType=atomType,
radicalElectrons=sorted(set(radicalElectrons)),
charge=None,
label=label,
lonePairs=lonePairsOfElectrons, # Assign lonePairsOfElectrons as None if it is not explicitly provided
)
else:
if lonePairsOfElectrons is not None:
# Intermediate adjlist representation
lonePairsOfElectrons = lonePairsOfElectrons + additionalLonePairs[0]
else:
# Add the standard number of lone pairs with the additional lone pairs
lonePairsOfElectrons = PeriodicSystem.lone_pairs[atomType[0]] + additionalLonePairs[0]
atom = Atom(element=atomType[0],
radicalElectrons=radicalElectrons[0],
charge=0,
label=label,
lonePairs=lonePairsOfElectrons,
)
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAttempted to create a bond between atom {0:d} and itself.'.format(aid,adjlist))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAtom {0:d} not in bond dictionary.'.format(atom2,adjlist))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Error in adjacency list:\n{2}\nFound bond between {0:d} and {1:d}, but not the reverse'.format(atom1, atom2, adjlist))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Error in adjacency list: \n{4}\nFound bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nMultiple bond orders specified for an atom.'.format(adjlist))
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if not group:
if saturateH:
# Add explicit hydrogen atoms to complete structure if desired
newAtoms = []
for atom in atoms:
try:
valence = PeriodicSystem.valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nCannot add hydrogens: Unknown valence for atom "{0}".'.format(atom.symbol, adjlist))
radical = atom.radicalElectrons
order = atom.getBondOrdersForAtom()
count = valence - radical - int(order) - 2*(atom.lonePairs-PeriodicSystem.lone_pairs[atom.symbol])
for i in range(count):
a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0)
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
# Calculate the multiplicity for the molecule and update the charges on each atom
nRad = 0 # total number of radical electrons
for atom in atoms:
atom.updateCharge()
nRad += atom.radicalElectrons
multiplicity = nRad + 1 # 2 s + 1, where s is the combined spin of unpaired electrons (s = 1/2 per unpaired electron)
else:
# Don't set a multiplicity for groups when converting from an old adjlist
multiplicity = None
except InvalidAdjacencyListError:
logging.error("Troublesome adjacency list:\n" + adjlist)
raise
return atoms, multiplicity
###############################
re_IntermediateAdjList = re.compile('^\s*(\d*)\s+' + # atom number digit
'(?P<label>\*\d*\s+)?' + # optional label eg * or *2
'(?P<atomtype>\{?[A-Z]\S*)\s+' + # atomtype eg R!H or {Cb,Cd}
'(?P<radicals>X|\d[STDQV]?|\{?\d[^}]*\})\s+' + #radicals eg. X or 2T or {1,2,2T}
'(?P<lonepairs>\d)' + # lone pairs eg. 0
'(?P<bonds>(\s+\{\d+\,(?:[SDTB]|\{.+?\})\},?)*)' + # bonds, eg {2,S} {4,{S,D}}
'\s*$') # the end!
re_OldAdjList = re.compile('^\s*(\d*)\s+' + # atom number digit
'(?P<label>\*\d*\s+)?' + # optional label eg * or *2
'(?P<atomtype>\{?[A-Z]\S*)\s+' + # atomtype eg R!H or {Cb,Cd}
'(?P<radicals>X|\d[STDQV]?|\{?\d[^}]*\})' + #radicals eg. X or 2T or {1,2,2T}
'(?P<bonds>(\s+\{\d+\,(?:[SDTB]|\{.+?\})\},?)*)' + # bonds, eg {2,S} {4,{S,D}}
'\s*$') # the end!
def fromAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
"""
atoms = []
atomdict = {}
bonds = {}
multiplicity = None
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Detect old-style adjacency lists by looking at the last line's syntax
lastLine = lines[-1].strip()
while not lastLine: # Remove any empty lines from the end
lines.pop()
lastLine = lines[-1].strip()
if re_IntermediateAdjList.match(lastLine):
logging.debug("adjacency list:\n{1}\nline '{0}' looks like an intermediate style adjacency list".format(lastLine, adjlist))
return fromOldAdjacencyList(adjlist, group=group, saturateH=saturateH)
if re_OldAdjList.match(lastLine):
logging.debug("Adjacency list:\n{1}\nline '{0}' looks like an old style adjacency list".format(lastLine, adjlist))
if not group:
logging.debug("Will assume implicit H atoms")
return fromOldAdjacencyList(adjlist, group=group, saturateH=(not group))
# Interpret the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
# Interpret the second line if it contains a multiplicity
if lines[0].split()[0] == 'multiplicity':
line = lines.pop(0)
if group:
match = re.match('\s*multiplicity\s+\[\s*(\d(?:,\s*\d)*)\s*\]\s*$', line)
if not match:
rematch = re.match('\s*multiplicity\s+x\s*$', line)
assert rematch, "Invalid multiplicity line '{0}'. Should be a list like 'multiplicity [1,2,3]' or a wildcard 'multiplicity x'".format(line)
else:
# should match "multiplicity [1]" or " multiplicity [ 1, 2, 3 ]" or " multiplicity [1,2,3]"
# and whatever's inside the [] (excluding leading and trailing spaces) should be captured as group 1.
# If a wildcard is desired, this line can be omitted or replaced with 'multiplicity x'
# Multiplicities must be only one digit (i.e. less than 10)
# The (?:,\s*\d)* matches patters like ", 2" 0 or more times, but doesn't capture them (because of the leading ?:)
multiplicities = match.group(1).split(',')
multiplicity = [int(i) for i in multiplicities]
else:
match = re.match('\s*multiplicity\s+\d+\s*$', line)
assert match, "Invalid multiplicity line '{0}'. Should be an integer like 'multiplicity 2'".format(line)
multiplicity = int(line.split()[1])
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list: \n{0}'.format(adjlist))
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '[Cd, Ct]'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"{1} Shouldn't have spaces inside braces:\n{0}".format(mistake1.search(line).group(), adjlist)
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '[':
if not group:
raise InvalidAdjacencyListError("Error on:\n{0}\nA molecule should not assign more than one atomtype per atom.".format(adjlist))
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next the number of unpaired electrons
unpairedElectrons = []
uState = data[index]
if uState[0] == 'u':
if uState[1] == '[':
uState = uState[2:-1].split(',')
else:
uState = [uState[1]]
for u in uState:
if u == '0':
unpairedElectrons.append(0)
elif u == '1':
unpairedElectrons.append(1)
elif u == '2':
unpairedElectrons.append(2)
elif u == '3':
unpairedElectrons.append(3)
elif u == '4':
unpairedElectrons.append(4)
elif u == 'x':
if not group:
raise InvalidAdjacencyListError("Error on:\n{0}\nA molecule should not assign a wildcard to number of unpaired electrons.".format(adjlist))
else:
raise InvalidAdjacencyListError('Number of unpaired electrons not recognized on\n{0}.'.format(adjlist))
index += 1
else:
raise InvalidAdjacencyListError('Number of unpaired electrons not defined on\n{0}.'.format(adjlist))
# Next the number of lone electron pairs (if provided)
lonePairs = []
if len(data) > index:
lpState = data[index]
if lpState[0] == 'p':
if lpState[1] == '[':
lpState = lpState[2:-1].split(',')
else:
lpState = [lpState[1]]
for l in lpState:
if l == '0':
lonePairs.append(0)
elif l == '1':
lonePairs.append(1)
elif l == '2':
lonePairs.append(2)
elif l == '3':
lonePairs.append(3)
elif l == '4':
lonePairs.append(4)
elif l == 'x':
if not group:
raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nA molecule should not have a wildcard assigned to number of lone pairs.".format(adjlist))
else:
raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nNumber of lone electron pairs not recognized.'.format(adjlist))
index += 1
else:
if not group:
lonePairs.append(0)
else:
if not group:
lonePairs.append(0)
# Next the number of partial charges (if provided)
partialCharges = []
if len(data) > index:
eState = data[index]
if eState[0] == 'c':
if eState[1] == '[':
eState = eState[2:-1].split(',')
else:
eState = [eState[1:]]
for e in eState:
if e == '0':
partialCharges.append(0)
elif e == '+1':
partialCharges.append(1)
elif e == '+2':
partialCharges.append(2)
elif e == '+3':
partialCharges.append(3)
elif e == '+4':
partialCharges.append(4)
elif e == '-1':
partialCharges.append(-1)
elif e == '-2':
partialCharges.append(-2)
elif e == '-3':
partialCharges.append(-3)
elif e == '-4':
partialCharges.append(-4)
elif e == 'x':
if not group:
raise InvalidAdjacencyListError("Error on adjacency list:\n{0}\nA molecule should not have a wildcard assigned to number of charges.".format(adjlist))
else:
raise InvalidAdjacencyListError('Error on adjacency list:\n{0}\nNumber of partial charges not recognized.'.format(adjlist))
index += 1
else:
if not group:
partialCharges.append(0)
else:
if not group:
partialCharges.append(0)
# Next the isotope (if provided)
isotope = -1
if len(data) > index:
iState = data[index]
if iState[0] == 'i':
isotope = int(iState[1:])
index += 1
# Create a new atom based on the above information
if group:
atom = GroupAtom(atomType, unpairedElectrons, partialCharges, label, lonePairs)
else:
atom = Atom(atomType[0], unpairedElectrons[0], partialCharges[0], label, lonePairs[0])
if isotope != -1:
atom.element = getElement(atom.number, isotope)
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAttempted to create a bond between atom {0:d} and itself.'.format(aid, adjlist))
if order[0] == '[':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAtom {0:d} not in bond dictionary.'.format(atom2, adjlist))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Error in adjacency list:\n{2}\nFound bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2, adjlist))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Error in adjacency list:\n{4}\nFound bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nMultiple bond orders specified for an atom in a Molecule.'.format(adjlist))
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if saturateH:
# Add explicit hydrogen atoms to complete structure if desired
if not group:
Saturator.saturate(atoms)
# Consistency checks
if not group:
# Molecule consistency check
# Electron and valency consistency check for each atom
for atom in atoms: ConsistencyChecker.check_partial_charge(atom)
nRad = sum([atom.radicalElectrons for atom in atoms])
absolute_spin_per_electron = 1/2.
if multiplicity == None: multiplicity = 2* (nRad * absolute_spin_per_electron) + 1
ConsistencyChecker.check_multiplicity(nRad, multiplicity)
for atom in atoms: ConsistencyChecker.check_hund_rule(atom, multiplicity)
return atoms, multiplicity
else:
# Currently no group consistency check
return atoms, multiplicity
def toAdjacencyList(atoms, multiplicity, label=None, group=False, removeH=False, removeLonePairs=False, oldStyle=False):
"""
Convert a chemical graph defined by a list of `atoms` into a string
adjacency list.
"""
if oldStyle:
return toOldAdjacencyList(atoms, multiplicity, label, group, removeH)
adjlist = ''
# Don't remove hydrogen atoms if the molecule consists only of hydrogen atoms
try:
if removeH and all([atom.element.symbol == 'H' for atom in atoms]): removeH = False
except AttributeError:
pass
if label: adjlist += label + '\n'
if group:
if multiplicity:
# Functional group should have a list of possible multiplicities.
# If the list is empty, then it does not need to be written
adjlist += 'multiplicity [{0!s}]\n'.format(','.join(str(i) for i in multiplicity))
else:
assert isinstance(multiplicity, int), "Molecule should have an integer multiplicity"
if multiplicity != 1 or any( atom.radicalElectrons for atom in atoms ):
adjlist += 'multiplicity {0!r}\n'.format(multiplicity)
# Determine the numbers to use for each atom
atomNumbers = {}
index = 0
for atom in atoms:
if removeH and atom.element.symbol == 'H' and atom.label == '': continue
atomNumbers[atom] = '{0:d}'.format(index + 1)
index += 1
atomLabels = dict([(atom, '{0}'.format(atom.label)) for atom in atomNumbers])
atomTypes = {}
atomUnpairedElectrons = {}
atomLonePairs = {}
atomCharge = {}
atomIsotope = {}
if group:
for atom in atomNumbers:
# Atom type(s)
if len(atom.atomType) == 1:
atomTypes[atom] = atom.atomType[0].label
else:
atomTypes[atom] = '[{0}]'.format(','.join([a.label for a in atom.atomType]))
# Unpaired Electron(s)
if len(atom.radicalElectrons) == 1:
atomUnpairedElectrons[atom] = str(atom.radicalElectrons[0])
elif len(atom.radicalElectrons) == 0:
atomUnpairedElectrons[atom] = 'x' # Empty list indicates wildcard
else:
atomUnpairedElectrons[atom] = '[{0}]'.format(','.join([str(radical) for radical in atom.radicalElectrons]))
# Lone Electron Pair(s)
if len(atom.lonePairs) == 1:
atomLonePairs[atom] = str(atom.lonePairs[0])
elif len(atom.lonePairs) == 0:
atomLonePairs[atom] = None # Empty list indicates wildcard
else:
atomLonePairs[atom] = '[{0}]'.format(','.join([str(pair) for pair in atom.lonePairs]))
# Charges
if len(atom.charge) == 1:
atomCharge[atom] = '+' + str(atom.charge[0]) if atom.charge[0] > 0 else str(atom.charge[0])
elif len(atom.charge) == 0:
atomCharge[atom] = None # Empty list indicates wildcard
else:
atomCharge[atom] = '[{0}]'.format(','.join(['+'+str(charge) if charge > 0 else ''+str(charge) for charge in atom.charge]))
# Isotopes
atomIsotope[atom] = -1
else:
for atom in atomNumbers:
# Atom type
atomTypes[atom] = '{0}'.format(atom.element.symbol)
# Unpaired Electron(s)
atomUnpairedElectrons[atom] = '{0}'.format(atom.radicalElectrons)
# Lone Electron Pair(s)
atomLonePairs[atom] = str(atom.lonePairs)
# Partial Charge(s)
atomCharge[atom] = '+'+str(atom.charge) if atom.charge > 0 else '' + str(atom.charge)
# Isotopes
atomIsotope[atom] = atom.element.isotope
# Determine field widths
atomNumberWidth = max([len(s) for s in atomNumbers.values()]) + 1
atomLabelWidth = max([len(s) for s in atomLabels.values()])
if atomLabelWidth > 0: atomLabelWidth += 1
atomTypeWidth = max([len(s) for s in atomTypes.values()]) + 1
atomUnpairedElectronsWidth = max([len(s) for s in atomUnpairedElectrons.values()])
#atomLonePairWidth = max([len(s) for s in atomLonePairs.values()])
#atomChargeWidth = max([len(s) for s in atomCharge.values()])
# Assemble the adjacency list
for atom in atoms:
if atom not in atomNumbers: continue
# Atom number
adjlist += '{0:<{1:d}}'.format(atomNumbers[atom], atomNumberWidth)
# Atom label
adjlist += '{0:<{1:d}}'.format(atomLabels[atom], atomLabelWidth)
# Atom type(s)
adjlist += '{0:<{1:d}}'.format(atomTypes[atom], atomTypeWidth)
# Unpaired Electron(s)
adjlist += 'u{0:<{1:d}}'.format(atomUnpairedElectrons[atom], atomUnpairedElectronsWidth)
# Lone Electron Pair(s)
if atomLonePairs[atom] != None:
adjlist += ' p{0}'.format(atomLonePairs[atom])
# Partial charges
if atomCharge[atom] != None:
adjlist += ' c{0}'.format(atomCharge[atom])
# Isotopes
if atomIsotope[atom] != -1:
adjlist += ' i{0}'.format(atomIsotope[atom])
# Bonds list
atoms2 = atom.bonds.keys()
# sort them the same way as the atoms
atoms2.sort(key=atoms.index)
for atom2 in atoms2:
if atom2 not in atomNumbers: continue
bond = atom.bonds[atom2]
adjlist += ' {{{0},'.format(atomNumbers[atom2])
# Bond type(s)
if group:
code = '[{0}]'
if len(bond.order) == 1:
code = '{0}'
# preference is for string representation, backs down to number
# numbers if doesn't work
try:
adjlist += code.format(','.join(bond.getOrderStr()))
except ValueError:
adjlist += code.format(','.join(str(bond.getOrderNum())))
else:
# preference is for string representation, backs down to number
# numbers if doesn't work
try:
adjlist += bond.getOrderStr()
except ValueError:
adjlist += str(bond.getOrderNum())
adjlist += '}'
# Each atom begins on a new line
adjlist += '\n'
return adjlist
def getOldElectronState(atom):
"""
Get the old adjacency list format electronic state
"""
additionalLonePairs = atom.lonePairs - PeriodicSystem.lone_pairs[atom.element.symbol]
electrons = atom.radicalElectrons + additionalLonePairs * 2
if electrons == 0:
electronState = '0'
elif electrons == 1:
electronState = '1'
elif electrons == 2:
if additionalLonePairs == 0:
electronState = '2T'
elif additionalLonePairs == 1:
electronState = '2S'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
elif electrons == 3:
if additionalLonePairs == 0:
electronState = '3Q'
elif additionalLonePairs == 1:
electronState = '3D'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
elif electrons == 4:
if additionalLonePairs == 0:
electronState = '4V'
elif additionalLonePairs == 1:
electronState = '4T'
elif additionalLonePairs == 2:
electronState = '4S'
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
else:
raise InvalidAdjacencyListError("Cannot find electron state of atom {0}".format(atom))
return electronState
def toOldAdjacencyList(atoms, multiplicity=None, label=None, group=False, removeH=False):
"""
Convert a chemical graph defined by a list of `atoms` into a string old-style
adjacency list that can be used in RMG-Java. Currently not working for groups.
"""
adjlist = ''
if group:
raise InvalidAdjacencyListError("Not yet implemented.")
# Filter out all non-valid atoms
if not group:
for atom in atoms:
if atom.element.symbol in ['He','Ne','Ar','N']:
raise InvalidAdjacencyListError("Old-style adjacency list does not accept He, Ne, Ar, N elements.")
# Don't remove hydrogen atoms if the molecule consists only of hydrogen atoms
try:
if removeH and all([atom.element.symbol == 'H' for atom in atoms]): removeH = False
except AttributeError:
pass
if label: adjlist += label + '\n'
# Determine the numbers to use for each atom
atomNumbers = {}
index = 0
for atom in atoms:
if removeH and atom.element.symbol == 'H' and atom.label == '': continue
atomNumbers[atom] = '{0:d}'.format(index + 1)
index += 1
atomLabels = dict([(atom, '{0}'.format(atom.label)) for atom in atomNumbers])
atomTypes = {}
atomElectronStates = {}
if group:
raise InvalidAdjacencyListError("Not yet implemented.")
else:
for atom in atomNumbers:
# Atom type
atomTypes[atom] = '{0}'.format(atom.element.symbol)
# Electron state(s)
atomElectronStates[atom] = '{0}'.format(getOldElectronState(atom))
# Determine field widths
atomNumberWidth = max([len(s) for s in atomNumbers.values()]) + 1
atomLabelWidth = max([len(s) for s in atomLabels.values()])
if atomLabelWidth > 0: atomLabelWidth += 1
atomTypeWidth = max([len(s) for s in atomTypes.values()]) + 1
atomElectronStateWidth = max([len(s) for s in atomElectronStates.values()])
# Assemble the adjacency list
for atom in atoms:
if atom not in atomNumbers: continue
# Atom number
adjlist += '{0:<{1:d}}'.format(atomNumbers[atom], atomNumberWidth)
# Atom label
adjlist += '{0:<{1:d}}'.format(atomLabels[atom], atomLabelWidth)
# Atom type(s)
adjlist += '{0:<{1:d}}'.format(atomTypes[atom], atomTypeWidth)
# Electron state(s)
adjlist += '{0:<{1:d}}'.format(atomElectronStates[atom], atomElectronStateWidth)
# Bonds list
atoms2 = atom.bonds.keys()
# sort them the same way as the atoms
atoms2.sort(key=atoms.index)
for atom2 in atoms2:
if atom2 not in atomNumbers: continue
bond = atom.bonds[atom2]
adjlist += ' {{{0},'.format(atomNumbers[atom2])
# Bond type(s)
if group:
if len(bond.order) == 1:
adjlist += bond.getOrderStr()[0]
else:
adjlist += '{{{0}}}'.format(','.join(bond.getOrderStr()))
else:
adjlist += bond.getOrderStr()
adjlist += '}'
# Each atom begins on a new line
adjlist += '\n'
return adjlist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.