code
stringlengths 1
199k
|
|---|
import setuptools
import os
import inspect
def readme():
with open("README.rst") as f:
return f.read()
def version():
with open("VERSION") as f:
return f.read().strip()
commands = ["logic/subuser","logic/execute-json-from-fifo"]
setuptools.setup(
name="subuser",
version=version(),
description="subuser - a program which helps you run other programs in containers, securely and portably.",
long_description=readme(),
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Programming Language :: Python :: 3.0",
],
keywords="subuser Docker containers security portability",
url="http://subuser.org",
author="Timothy Hobbs",
author_email="timothy@hobbs.cz",
license="LGPL-3.0",
packages=setuptools.find_packages(),
package_data={'subuserlib': ['data/*']},
scripts=commands,
setup_requires=['wheel'],
include_package_data=True,
zip_safe=False)
|
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from http.cookiejar import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from suds.sudsobject import Factory as InstFactory
from suds.sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urllib.parse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return self.__unicode__()
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % str(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception as e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception('No services defined')
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound('at [%d]' % name)
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound(name)
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception('No ports defined: %s' % self.__qn)
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound(qn)
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound(qn)
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound(qn)
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault as e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError as e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
loc = p.get('location', self.method.location)
if type(loc == bytes):
return loc.decode()
return loc
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return SimClient.injkey in kwargs
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
|
import ifcopenshell
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"material_list": None, "material": None}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
materials = list(self.settings["material_list"].Materials or [])
materials.append(self.settings["material"])
self.settings["material_list"].Materials = materials
|
import theano
from theano import tensor as T
import theano.tensor
import numpy as np
x = T.fmatrix('x')
v = T.fmatrix('v')
y = T.transpose(x) + v
update_weight_theano = theano.function(inputs=[x, v],
outputs=[y])
def update_weight_np(x, v):
# return np.sum(x, axis=0) + v
# return np.dot(x, v)
return np.transpose(x) + v
def linear_combine():
x = T.fmatrix('x')
w = T.vector('w', 'float32')
# z = T.dot(w, x)
z = x * w
f = theano.function(inputs=[x, w], outputs=z)
print f(np.arange(4).reshape([2, 2]).astype('float32'), np.array([0.1, 0.2], dtype='float32'))
def linear_combine_shared():
x = T.fmatrix('x')
value = np.asarray(np.array([0.1, 0.2], dtype='float32'))
w = theano.shared(value=value, strict=False)
# w = T.vector('w', 'float32')
z = T.dot(w, x)
# z = x * w
f = theano.function(inputs=[x], outputs=z)
print f(np.arange(4).reshape([2, 2]).astype('float32'))
def weighting():
from theano.tensor import TensorType
x = T.ftensor3()
# w = TensorType('float32', (False, False, True))()
w = T.ftensor3()
# z = T.dot(w, x)
# y = T.addbroadcast(w, 2)
# y = w.reshape([w.shape[0], w.shape[1]])
y = T.flatten(w, 2)
z = x * y
f = theano.function(inputs=[x, w], outputs=z)
input1 = np.arange(8).reshape([2, 2, 2]).astype('float32')
input2 = np.array(
[
[
[0.1], [0.2]
],
[
[0.2], [0.4]
]
]
).astype('float32')
print input1, input1.shape
print
print input2, input2.shape
print
print f(input1, input2)
# print input1 * input2
def weighting1():
from theano.tensor import TensorType
input1 = np.arange(12).reshape([3, 2, 2]).astype('float32')
input2 = np.array(
[1., 2., 3.]
).astype('float32')
x = T.ftensor3()
# w = TensorType('float32', (False, False, True))()
w = theano.shared(value=input2, name='w', strict=False)
y = (w * x.T).T
f = theano.function(inputs=[x], outputs=y)
print input1, input1.shape
print
print input2, input2.shape
print
print f(input1)
# print input1 * input2
def concatenate():
input = np.arange(12).reshape([2, 3, 2]).astype('float32')
print input
print
pad_len = 1
Y = T.tensor3()
z = T.concatenate([Y[:, :pad_len, :], Y, Y[:, Y.shape[1] - pad_len:, :]], axis=1)
f = theano.function(inputs=[Y], outputs=z)
print f(input)
def arg_sort():
a, b, c = 2, 4, 4
input = np.arange(a*b*c).reshape([a, b, c]).astype('float32')
# print input
print
x = T.tensor3()
z = T.argsort(x, axis=2)[:, :, :2].astype('int64')
z = theano.printing.Print("z")(z)
z = x[z[0].flatten()]
# z = x[T.arange(x.shape[0], dtype='int32'), T.arange(x.shape[1], dtype='int32'), z]
f = theano.function(inputs=[x], outputs=z)
r = f(input)
def t_grad():
x = T.matrix()
y = T.mean(T.sum(x, axis=1))
z = T.grad(y, x)
print type(y)
def test_cache():
from keras.layers.core import Dense
from theano import pp, function
from theano import config
import cPickle as pkl
# Theano configuration
config.optimizer = 'fast_run'
X = T.matrix()
d = Dense(200, input_dim=1000)
# d1 = Dense(200, input_dim=1000)
d.build()
Y = d(X) + d(X)
z = d(X)
Y1 = z + z
f = function([X], Y)
f1 = function([X], Y1)
# print pp(Y)
# print pp(f.maker.fgraph.outputs[0])
print theano.printing.debugprint(f)
print
print theano.printing.debugprint(f1)
print
print theano.printing.debugprint(z)
pkl.dump(f, open('test.pkl', 'wb'))
pkl.dump(f1, open('test1.pkl', 'wb'))
if __name__ == '__main__':
# linear_combine()
# linear_combine_shared()
# weighting1()
# concatenate()
arg_sort()
# t_grad()
# test_cache()
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from farxiv.forms import *
import json
@login_required
def farticle_builder_view(request):
if request.method == 'POST':
title = request.POST['title']
authors = request.POST['authors']
summary = request.POST['summary']
data = json.load(request.POST['data'])
steps = data.steps
problems = data.problems
suggestions = data.suggestions
images = data.images
links = data.links
for link in links:
# Link the lists and images in the DB
pass
return render(request, 'view.html', {'farticle': farticle_id})
else:
return render(request, 'builder.html')
|
import enum
class H264SubMe(enum.Enum):
FULLPEL = 'FULLPEL'
SAD = 'SAD'
SATD = 'SATD'
QPEL3 = 'QPEL3'
QPEL4 = 'QPEL4'
QPEL5 = 'QPEL5'
RD_IP = 'RD_IP'
RD_ALL = 'RD_ALL'
RD_REF_IP = 'RD_REF_IP'
RD_REF_ALL = 'RD_REF_ALL'
|
import sys
def caesar(code, shift):
mapping = {
n: start + (n - start + shift) % 26
for start, n in zip(
[65] * 26 + [97] * 26, list(range(65, 91)) + list(range(97, 123))
)
}
return code.translate(mapping)
if __name__ == "__main__":
with open("/usr/share/dict/words") as f:
word_list = set(x.strip() for x in f.readlines())
code = sys.stdin.read()
counts = []
for shift in range(26):
x = sum(word in word_list for word in caesar(code, shift).split())
counts.append(x)
guess, _ = max(enumerate(counts), key=lambda t: t[1])
print(f"{guess}: {caesar(code, guess)}")
|
print("Hello World")
if 1 == 1 :
print (" Hi Sandy")
|
class Game():
def roll(self, pins):
pass
def score(self):
return -1
|
import json
import random
import time
import os
from core import pinBot
bot = pinBot()
bot_dir = os.path.dirname(__file__)
with open(os.path.join(bot_dir,'pinterestBot.json')) as json_data:
bot_config = json.load(json_data)
with open(os.path.join(bot_dir,'done_pins.json')) as json_data:
done_pins = json.load(json_data)
pin_upload_count = bot_config['periods']['post_to_post_per_day']
available_posts = list(filter(lambda x: x['imageUrl'] not in done_pins ,bot_config['posts']))
if len(available_posts) <= pin_upload_count:
pin_upload_count = len(available_posts)
if len(available_posts)>0:
for post in random.sample(available_posts,pin_upload_count):
bot.createPost(post['imageUrl'],post['note'])
time.sleep(1)
done_pins.append(post['imageUrl'])
with open(os.path.join(bot_dir,'done_pins.json'), 'w') as outfile:
json.dump(done_pins, outfile)
|
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
import base64
import sys
def pkcs7_padding(message_bytes, block_size):
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
message_bytes += bytes([pad_length])
return message_bytes
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(pkcs7_padding(message, 16))
def detectEBC(cipher, block_size):
blocks = []
for i in range(int(len(cipher)/block_size)):
blocks.append(cipher[i*block_size:i*block_size+block_size])
#detecting if dups exist: http://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list
if (len(set([x for x in blocks if blocks.count(x) > 1]))) > 0:
return True
else:
return False
def ecb_oracle(mytext, plaintext):
#using the same prefix scheme as used in challenge 11 since the spec is pretty broad.
plaintext_prefix = bytes([random.randint(0, 255) for i in range(random.randint(5, 10))])
cipher = encrypt_aes128(plaintext_prefix + mytext + plaintext, global_key)
return cipher
def detect_plaintext_padding_size(oracle_func, plaintext, block_size):
count = 0
mytext = b""
observed_blocks = None
while True:
cipher = oracle_func(mytext, plaintext)
next_observed_blocks = len(cipher) / block_size
if observed_blocks != None and observed_blocks < next_observed_blocks:
break
observed_blocks = next_observed_blocks
mytext += bytes("A", "ascii")
count += 1
return (count - 1)
def return_sorted_counts_of_lengths(oracle_func, attack_array, plaintext, num_runs=200):
lengths = []
for i in range(num_runs):
l = len(oracle_func(attack_array, plaintext))
if l not in lengths:
lengths.append(l)
return sorted(lengths)
def find_prefix_delta(oracle_func, plaintext, block_size):
#we want to find an attack array that results in variable lengths of the cipher text (state 1)
#we can use that attack array by incrementing a byte at a time til we find an attack array of one len (state 2)
#we then increment the attack array.
#when we find one of multiple len, the delta between state 2 and now gives the delta of min and max.
#this is state 3.
bounds_count = 0
bounds_state = 0
state_2_len = None
min_max_delta = None
while True:
bounds_count += 1
#first we will find an attack array that yields variably sized cipher texts
ints = [ord("A") for i in range(bounds_count)]
bounds_attack_array = bytes(ints)
#undetermined
if bounds_state == 0:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
pass
else:
bounds_state = 1
continue
#variable-length ciphers - looking for the first mono-length
if bounds_state == 1:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
bounds_state = 2
state_2_len = len(bounds_attack_array)
else:
pass
continue
#mono-length ciphers - looking for the first variable length to show us what we subtract from the blocksize
#to arrive at the delta (delta = blocksize - (length - state 2 length)
if bounds_state == 2:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
pass
else:
bounds_state = 3
#this number will give me the delta between min and max
min_max_delta = block_size - (len(bounds_attack_array) - state_2_len)
break
continue
return min_max_delta
def crack_ecb(oracle_func, plaintext):
#detect block size by determining the delta of the first jump in cipher size as the plaintext size increases
block_size = None
cipher_size = len(oracle_func(b"A", plaintext))
size_count = 1
while True:
ints = [ord("A") for i in range(size_count)]
size_attack_array = bytes(ints)
next_cipher_size = len(oracle_func(size_attack_array, plaintext))
if next_cipher_size > cipher_size:
block_size = next_cipher_size - cipher_size
break
size_count += 1
#not sure i need this
prefix_delta = find_prefix_delta(oracle_func, plaintext, block_size)
sizes_of_base_plaintext = return_sorted_counts_of_lengths(oracle_func, b"", plaintext)
top_size_of_base_plaintext = sizes_of_base_plaintext[-1]
number_of_blocks_to_decode = int(top_size_of_base_plaintext / block_size)
analysis_block = number_of_blocks_to_decode + 1
print("size of base plaintext " + str(sizes_of_base_plaintext))
print("number of blocks to decode " + str(number_of_blocks_to_decode))
print("analysis block " + str(analysis_block))
#figure out the base attack array to populate the analysis block
#--------------------------------------------------------------------------------------------
base_attack_array_size = 1
base_attack_array = b""
while True:
ints = [ord("A") for i in range(base_attack_array_size)]
base_attack_array = bytes(ints)
plaintext_sizes = return_sorted_counts_of_lengths(oracle_func, base_attack_array, plaintext)
if plaintext_sizes[-1] > top_size_of_base_plaintext:
break
base_attack_array_size += 1
#print("base attack array is " + str(base_attack_array))
#print("size of base attack array is " + str(base_attack_array_size))
#--------------------------------------------------------------------------------------------
#the solved plain text we accumulate and return
solved_plain_text = b""
for block_number in range(number_of_blocks_to_decode):
sys.stdout.write("decrypting...")
sys.stdout.flush()
for byte_number in range(block_size):
sys.stdout.write(".")
sys.stdout.flush()
if solved_plain_text[0:5] == b"AAAAA":
break
#generate the next attack array
ints = [ord("A") for i in range(base_attack_array_size + (block_number*block_size) + byte_number)]
attack_array = bytes(ints)
#calculate a list that has all potential plaintexts
# the format of each element in this array is:
# [byte_iterator | blocksize worth of most recent bz-1 solved_plain_text | padding if necessary]
#build the just short array
jsa_solved_plain_text = b""
jsa_padding = b""
if (len(solved_plain_text)) >= block_size:
jsa_solved_plain_text = solved_plain_text[:(block_size-1)]
else:
jsa_solved_plain_text = solved_plain_text
padding_lenth = block_size - len(solved_plain_text) - 1
for i in range(padding_lenth):
jsa_padding += bytes([padding_lenth])
just_short_array = jsa_solved_plain_text + jsa_padding
just_short_array_bytes_dict = {}
for i in range(0, 127+1):
just_short_array_bytes_dict[i] = bytes([i]) + just_short_array
#now generate the cryptotexts we want to match
crypto_text_candidates = []
for i in range(50):
#if the byte is in the dict, create an entry in the dict of a one-element list
candidate_crypt = oracle_func(
attack_array, plaintext)
if len(candidate_crypt) >= analysis_block * block_size:
#only extract the analysis block from the candidate
entire_candidate_crypt = candidate_crypt
candidate_crypt = candidate_crypt[(analysis_block - 1)*block_size:analysis_block*block_size]
if candidate_crypt not in crypto_text_candidates:
crypto_text_candidates.append(candidate_crypt)
#print(just_short_array_bytes_dict)
#print(crypto_text_candidates)
#now gen a bunch of ciphertexts, looking at the second block and comparing it to our crypto_text_candidates
attack_count = 1
solved_byte = None
while True:
if attack_count > block_size*3:
print("error, force breaking out of byte decryption attack loop, and exiting")
exit(1)
break
elif solved_byte is not None:
break
for element in just_short_array_bytes_dict:
if solved_byte is not None:
break
test_case = just_short_array_bytes_dict[element]
#gen a bunch of ciphers...
ciphers = []
for c in range(50):
intz = \
[ord("A") for lol in range(attack_count)]
ciph = oracle_func(bytes(intz) + test_case, plaintext)
if ciph not in ciphers:
ciphers.append(ciph)
#compare generated ciphers with the crypto candidates. The intersection will reveal the next byte.
for c in ciphers:
if c[block_size:block_size*2] in crypto_text_candidates:
solved_byte = test_case[0]
break
attack_count += 1
solved_plain_text = bytes([solved_byte]) + solved_plain_text
print("\nsolved plaintext so far: " + str(solved_plain_text))
return solved_plain_text.decode("ascii").lstrip("A")
if __name__ == '__main__':
global global_key
global_key = generateRandom16bytes()
b64_unknown_string = """Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""".replace("\n", "")
#prep the plaintext, though we don't want to know what it is yet
#(we are going to use the oracle to crack encrypted versions of the plaintext)
unknown_string = base64.b64decode(b64_unknown_string)
challenge_plaintext = bytes(unknown_string)
solved = crack_ecb(ecb_oracle, challenge_plaintext)
print("----------------------")
print(solved)
|
from gi.repository import Gtk
import time
class TreeviewColumn(object):
def __init__(self, column_name, ordernum, hidden=True, fixed_size=False):
self.column_name=column_name
self.ordernum=ordernum
self.hidden=hidden
self.fixed_size=fixed_size
def add_column_to_treeview(columnname,counter,hidden,fixed_size=False):
column=Gtk.TreeViewColumn(columnname)
if hidden==True:
column.set_visible(False)
renderer=Gtk.CellRendererText()
column.pack_start(renderer,True)
column.add_attribute(renderer, "text", counter)
if fixed_size==True:
column.set_fixed_width(50)
return column
def show_info_dialog(message):
info_dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, message)
info_dialog.run()
info_dialog.destroy()
class DateWidget(Gtk.Grid):
def __init__(self, day_text_entry, month_text_entry, year_text_entry):
Gtk.Grid.__init__(self)
self.day_text_entry=day_text_entry
self.month_text_entry=month_text_entry
self.year_text_entry=year_text_entry
self.create_date_grid(True)
def create_date_grid(self, show_calendar=False):
self.set_column_spacing(5)
self.day_text_entry.set_max_length(2)
self.day_text_entry.set_width_chars(2)
self.attach(self.day_text_entry,0,0,1,1)
self.month_text_entry.set_max_length(2)
self.month_text_entry.set_width_chars(2)
self.attach_next_to(self.month_text_entry, self.day_text_entry, Gtk.PositionType.RIGHT, 1, 1)
self.year_text_entry.set_max_length(4)
self.year_text_entry.set_width_chars(4)
self.attach_next_to(self.year_text_entry, self.month_text_entry, Gtk.PositionType.RIGHT, 1, 1)
self.attach(Gtk.Label("DD"),0,1,1,1)
self.attach(Gtk.Label("MM"),1,1,1,1)
self.attach(Gtk.Label("YYYY"),2,1,1,1)
self.set_hexpand(False)
if show_calendar==True:
pick_date_button=Gtk.Button("Pick date")
self.attach(pick_date_button,3,0,1,1)
pick_date_button.connect("clicked", self.show_calendar)
def show_calendar(self, widget ):
self.calendar_window=Gtk.Dialog()
self.calendar_window.action_area.hide()
self.calendar_window.set_decorated(False)
self.calendar_window.set_property('skip-taskbar-hint', True)
self.calendar_window.set_size_request(200,200)
calendar=Gtk.Calendar()
calendar.connect('day-selected-double-click', self.day_selected, None)
self.calendar_window.vbox.pack_start(calendar, True, True, 0)
calendar.show()
self.calendar_window.run()
def day_selected(self, calendar, event):
(year,month,day)=calendar.get_date()
month=month+1
self.day_text_entry.set_text("%s" % day)
self.month_text_entry.set_text("%s" % month)
self.year_text_entry.set_text("%s" % year)
self.calendar_window.destroy()
def set_date_from_string(self, date_as_string):
tm=time.strptime(date_as_string, "%Y-%m-%d")
self.day_text_entry.set_text("%s" % tm.tm_mday)
self.month_text_entry.set_text("%s" % tm.tm_mon)
self.year_text_entry.set_text("%s" % tm.tm_year)
class TextViewWidget(Gtk.Grid):
def __init__(self, textview, model_text=None):
Gtk.Grid.__init__(self)
self.textview=textview
self.textview.set_wrap_mode(Gtk.WrapMode.WORD)
self.model_text=model_text
self.create_textview_widget()
def create_textview_widget(self):
scrolledwindow= Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
if self.model_text!=None:
self.textview.get_buffer().set_text(self.model_text)
scrolledwindow.add(self.textview)
self.attach(scrolledwindow,0,1,1,1)
def get_textview_text(self):
textbuffer=self.textview.get_buffer()
return textbuffer.get_text(textbuffer.get_start_iter(),textbuffer.get_end_iter(),True)
def set_text(self, text):
if text!=None:
textbuffer=self.textview.get_buffer()
textbuffer.set_text("%s" % text)
|
import logging
import os
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter("%(levelname)s\t%(filename)s\t%(message)s"))
logger = logging.getLogger()
logger.addHandler(sh)
logger.setLevel(logging.DEBUG)
_FILE_NAME = "windninja.log"
PRETTY_PRINT_JOB = False
def enable_file(folder, level):
fh = logging.FileHandler(os.path.join(folder, _FILE_NAME))
fh.setLevel(level)
fh.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s\t%(filename)s\t%(message)s")
)
logger.addHandler(fh)
|
def correctScholarships(bestStudents, scholarships, allStudents):
return set(bestStudents) <= set(scholarships) < set(allStudents)
|
import os
import pip
from pip.req import parse_requirements
from setuptools import setup, find_packages
requirements = [str(requirement.req) for requirement in parse_requirements('requirements.txt', session=pip.download.PipSession())]
setup(
name='project_generator',
version='0.6.0-alpha',
description='Project generators for various embedded tools (IDE). IAR, uVision, Makefile and many more in the roadmap!',
author='Martin Kojtal, Matthew Else',
author_email='c0170@rocketmail.com, matthewelse1997@gmail.com',
keywords="c cpp project generator embedded",
url="https://github.com/0xc0170/project_generator",
classifiers= [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development"
],
packages=find_packages(),
entry_points={
'console_scripts': [
"project_generator=project_generator.main:main",
"pgen=project_generator.main:main",
]
},
install_requires = requirements,
include_package_data = True,
)
|
import datetime
import subprocess
import mock
import unittest
import rev2
from api.models import HouseCode
class Base(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class BackgroundPollerTest(Base):
# @mock.patch('subprocess.Popen')
# def test_start_is_successful(self, mock_subprocess_popen):
# mock_rev2_interface = mock.Mock(POLLING_FREQUENCY=mock.Mock())
# mock_house_codes = house_codes=[mock.Mock()]
# background_poller = rev2.BackgroundPoller(mock_house_codes)
# background_poller.start()
# mock_subprocess_popen.assert_called_once_with(['python', 'manage.py', 'start_polling'])
def test_debug(self):
mock_house_code = mock.Mock(code='FA-32')
bg_poller = rev2.BackgroundPoller(house_codes=[mock_house_code])
# removes house code from existing polling patterns
mock_remove_house_code = mock.Mock()
bg_poller.remove_house_code = mock_remove_house_code
# creates a new debug polling pattern
mock_create_debug_polling_pattern = mock.Mock()
mock_polling_pattern = mock.Mock()
mock_create_debug_polling_pattern.return_value = mock_polling_pattern
bg_poller.create_debug_polling_pattern = mock_create_debug_polling_pattern
# adds the polling pattern to the bg poller
mock_add_polling_pattern = mock.Mock()
bg_poller.add_polling_pattern = mock_add_polling_pattern
# restarts the bg poller
mock_stop_bg_poller = mock.Mock()
mock_start_bg_poller = mock.Mock()
bg_poller.stop = mock_stop_bg_poller
bg_poller.start = mock_start_bg_poller
bg_poller.debug(mock_house_code)
mock_remove_house_code.assert_called_once_with(mock_house_code)
mock_create_debug_polling_pattern.assert_called_once_with(mock_house_code)
mock_add_polling_pattern.assert_called_once_with(mock_polling_pattern)
mock_stop_bg_poller.assert_called_once_with()
mock_start_bg_poller.assert_called_once_with()
def test_remove_house_code(self):
mock_house_code = mock.Mock(code='FA-32')
bg_poller = rev2.BackgroundPoller(house_codes=[mock_house_code])
bg_poller.remove_house_code(mock_house_code)
self.assertNotIn(mock_house_code, bg_poller.polling_patterns[0].house_codes)
@mock.patch('rev2.DebugPollingPattern')
def test_create_debug_polling_pattern(self, mock_DebugPollingPattern):
mock_house_code = mock.Mock(code='FA-32')
bg_poller = rev2.BackgroundPoller(house_codes=[mock_house_code])
mock_debug_polling_pattern = mock.Mock()
mock_DebugPollingPattern.return_value = mock_debug_polling_pattern
polling_pattern = bg_poller.create_debug_polling_pattern(mock_house_code)
self.assertEqual(mock_debug_polling_pattern, polling_pattern)
def test_add_polling_pattern(self):
mock_polling_pattern = mock.Mock()
bg_poller = rev2.BackgroundPoller(house_codes=[])
bg_poller.add_polling_pattern(mock_polling_pattern)
self.assertEqual(bg_poller.polling_patterns[1], mock_polling_pattern)
class PollResponseTest(Base):
@mock.patch('api.models.HouseCode')
def test_main(self, mock_HouseCode):
mock_house_code = mock.Mock(code='FA-32')
mock_HouseCode.return_value = mock_house_code
poll_response = rev2.PollResponse("'*' FA-32 FA-32 true|true|1+15 1+100 1+100 true|10|0 nzcrc")
self.assertEqual(poll_response.house_code, mock_house_code)
self.assertEqual(poll_response.window, 'open')
self.assertEqual(poll_response.switch, 'on')
self.assertEqual(poll_response.relative_humidity, 30)
self.assertEqual(poll_response.temperature_ds18b20, 50)
self.assertEqual(poll_response.temperature_opentrv, 25)
self.assertEqual(poll_response.synchronising, 'on')
self.assertEqual(poll_response.ambient_light, 38)
class RandomPollResponseGeneratorTest(Base):
@mock.patch('api.models.HouseCode.generate_random_house_code')
@mock.patch('rev2.PollResponse')
def test_generates_a_random_house_code_if_not_given_one(self, mock_poll_response, mock_house_code_generator):
mock_house_code_generator.return_value = 'FA-32'
poll_response = mock.Mock()
mock_poll_response.return_value = poll_response
output = rev2.Rev2EmulatorInterface().generate_random_poll_response(house_code=None)
self.assertEqual(output, poll_response)
mock_house_code_generator.assert_called_once_with()
class PollAndCommandTest(Base):
def test_poll_and_command_initialises_correctly(self):
mock_house_code = mock.Mock()
mock_house_code.code = 'FA-32'
mock_house_code.rad_open_percent = 50
mock_house_code.light_colour = 2
mock_house_code.light_on_time = 30
mock_house_code.light_flash = 1
mock_house_code.last_updated_all = datetime.datetime.now()
poll_and_command = rev2.PollAndCommand(house_code=mock_house_code, set_led_settings=True)
self.assertEqual(poll_and_command.__str__(), "'?' FA-32 FA-32 1+50 1|1|2 1 1 nzcrc")
|
import logging
from flask import jsonify, request, redirect, send_file
import flask_login
from mediacloud.error import MCException
import tempfile
import json
import os
import csv
import io
import zipfile
from server import app, auth, mc, user_db
from server.auth import user_mediacloud_client, user_name, user_is_admin
from server.util.request import api_error_handler, form_fields_required, arguments_required, json_error_response
from server.views.topics.topiclist import topics_user_can_access
logger = logging.getLogger(__name__)
AUTH_MANAGEMENT_DOMAIN = 'https://tools.mediacloud.org' # because it is too hard to tell which site you are on
ACTIVATION_URL = AUTH_MANAGEMENT_DOMAIN + "/api/user/activate/confirm"
PASSWORD_RESET_URL = AUTH_MANAGEMENT_DOMAIN + "/api/user/reset-password-request-receive"
def _create_user_session(user_results):
if not isinstance(user_results, dict):
user_results = user_results.get_properties()
# HACK: the API used to return this as true/false, but not returns it as 1 or 0, so we change it to
# boolean here so we don't have to change front-end JS logic
user_results['profile']['has_consented'] = (user_results['profile']['has_consented'] == 1) or \
(user_results['profile']['has_consented'] is True)
merged_user_info = user_results['profile'].copy() # start with x's keys and values
if 'error' in user_results:
return json_error_response(user_results['error'], 401)
user = auth.create_user(merged_user_info)
return user
@app.route('/api/login', methods=['POST'])
@form_fields_required('email', 'password')
@api_error_handler
def login_with_password():
username = request.form["email"]
logger.debug("login request from %s", username)
password = request.form["password"]
# try to log them in
results = mc.authLogin(username, password)
user = _create_user_session(results)
logger.debug(" succeeded - got a key (user.is_anonymous=%s)", user.is_anonymous)
auth.login_user(user)
return jsonify(user.get_properties())
@app.route('/api/login-with-cookie')
@api_error_handler
def login_with_cookie():
cached_user = flask_login.current_user
if cached_user.is_anonymous: # no user session
logger.debug(" login failed (%s)", cached_user.is_anonymous)
return json_error_response("Login failed", 401)
user = _create_user_session(cached_user)
return jsonify(user.get_properties())
@app.route('/api/user/signup', methods=['POST'])
@form_fields_required('email', 'password', 'fullName', 'notes', 'has_consented')
@api_error_handler
def signup():
logger.debug("reg request from %s", request.form['email'])
results = mc.authRegister(request.form['email'],
request.form['password'],
request.form['fullName'],
request.form['notes'],
ACTIVATION_URL,
bool(request.form['has_consented'] == 'true') if 'has_consented' in request.form else False,
)
return jsonify(results)
@app.route('/api/user/activate/confirm', methods=['GET'])
@arguments_required('email', 'activation_token')
def activation_confirm():
logger.debug("activation request from %s", request.args['email'])
try:
results = mc.authActivate(request.args['email'], request.args['activation_token'])
if results['success'] == 1:
redirect_to_return = redirect(AUTH_MANAGEMENT_DOMAIN + '/#/user/activated?success=1')
else:
redirect_to_return = redirect(AUTH_MANAGEMENT_DOMAIN + '/#/user/activated?success=0&msg=' +
results['error'])
except MCException as mce:
# this is long stack trace so we have to trim it for url length support
redirect_to_return = redirect(AUTH_MANAGEMENT_DOMAIN + '/#/user/activated?success=0&msg=' + str(mce)[:300])
return redirect_to_return
@app.route('/api/user/activation/resend', methods=['POST'])
@form_fields_required('email')
@api_error_handler
def activation_resend():
email = request.form['email']
logger.debug("activation request from %s", email)
results = mc.authResendActivationLink(email, ACTIVATION_URL)
return jsonify(results)
@app.route('/api/user/reset-password-request', methods=['POST'])
@form_fields_required('email')
@api_error_handler
def request_password_reset():
logger.debug("request password reset from %s", request.form['email'])
results = mc.authSendPasswordResetLink(request.form["email"], PASSWORD_RESET_URL)
return jsonify(results)
@app.route('/api/user/reset-password-request-receive', methods=['GET'])
@arguments_required('email', 'password_reset_token')
@api_error_handler
def request_password_reset_receive():
redirect_to_return = redirect(AUTH_MANAGEMENT_DOMAIN +
'/#/user/reset-password?email={}&password_reset_token={}'.format(
request.args['email'], request.args['password_reset_token']))
return redirect_to_return
@app.route('/api/user/reset-password', methods=['POST'])
@form_fields_required('email', 'password_reset_token', 'new_password')
@api_error_handler
def reset_password():
logger.debug("reset password for %s", request.form['email'])
results = mc.authResetPassword(request.form["email"], request.form['password_reset_token'],
request.form['new_password'])
return jsonify(results)
@app.route('/api/user/change-password', methods=['POST'])
@form_fields_required('old_password', 'new_password')
@flask_login.login_required
@api_error_handler
def change_password():
user_mc = user_mediacloud_client()
results = {}
try:
results = user_mc.authChangePassword(request.form['old_password'], request.form['new_password'])
except MCException as e:
logger.exception(e)
if 'Unable to change password' in e.message:
if 'Old password is incorrect' in e.message or 'Unable to log in with old password' in e.message:
return json_error_response('Unable to change password - old password is incorrect')
if 'not found or password is incorrect' in e.message:
return json_error_response('Unable to change password - user not found or password is incorrect')
else:
return json_error_response('Unable to change password - see log for more details')
return jsonify(results)
@app.route('/api/user/reset-api-key', methods=['POST'])
@flask_login.login_required
@api_error_handler
def reset_api_key():
user_mc = user_mediacloud_client()
results = user_mc.authResetApiKey()
flask_login.current_user.profile = results['profile'] # update server api key too
return jsonify(results)
@app.route('/api/user/logout')
def logout():
flask_login.logout_user()
return redirect("/")
@app.route('/api/user/delete', methods=['POST'])
@form_fields_required('email')
@api_error_handler
@flask_login.login_required
def api_user_delete():
email = request.form['email']
user = flask_login.current_user
if email == user.name: # double-check confirmation they typed in
# delete them from the front-end system database
user_db.delete_user(user.name)
# delete them from the back-end system
results = mc.userDelete(user.profile['auth_users_id']) # need to do this with the tool's admin account
try:
if ('success' in results) and (results['success'] == 1):
return logout()
else:
return json_error_response("We failed to delete your account, sorry!", 400)
except MCException as mce:
logger.exception(mce)
return json_error_response("We failed to delete your account, sorry!", 400)
else:
return json_error_response("Your email confirmation didn't match.", 400)
@app.route('/api/user/update', methods=['POST'])
@form_fields_required('full_name', 'notes', 'has_consented')
@api_error_handler
@flask_login.login_required
def api_user_update():
has_consented = request.form['has_consented'] if 'has_consented' in request.form else False
if has_consented == 'null':
has_consented = False
valid_params = {
'full_name': request.form['full_name'],
'notes': request.form['notes'],
'has_consented': has_consented
}
cached_user = flask_login.current_user
# need to update user with the tool admin client, because user doesn't have permission to do this themselves
mc.userUpdate(cached_user.profile['auth_users_id'], **valid_params)
user_mc = user_mediacloud_client()
updated_user = user_mc.userProfile()
cached_user.profile = updated_user
user = _create_user_session(cached_user)
return jsonify(user.get_properties())
@app.route('/api/user/download-data')
@api_error_handler
@flask_login.login_required
def api_user_data_download():
user_mc = user_mediacloud_client()
temp_user_data_dir = _save_user_data_dir(flask_login.current_user, user_mc)
data = _zip_in_memory(temp_user_data_dir) # do this in memory to be extra safe on security
return send_file(data, mimetype='application/zip', as_attachment=True, attachment_filename='mediacloud-data.zip')
def _zip_in_memory(dir_to_zip):
# remember our home dir
old_path = os.getcwd()
os.chdir(dir_to_zip)
# send
data = io.BytesIO()
with zipfile.ZipFile(data, mode='w') as z:
for f_name in os.listdir("."): # doing the whole path switch to make sure the zip folder structure is right
z.write(f_name)
os.unlink(f_name)
data.seek(0) # to make sure the file starts at teh begging again, *not* where the zip commands left it
# put us back in the home dir
os.chdir(old_path)
os.rmdir(dir_to_zip)
return data
def _save_user_data_dir(u, user_mc):
# make a dir first (prefix with user_id for extra security)
temp_dir = tempfile.mkdtemp(prefix='user{}'.format(u.profile['auth_users_id']))
# user profile
with open(os.path.join(temp_dir, 'profile.json'), 'w') as outfile:
profile = u.profile
json.dump(profile, outfile)
# topic-level permissions
with open(os.path.join(temp_dir, 'topic-permissions.csv'), 'w') as outfile:
topics = user_mc.topicList(limit=1000)['topics']
user_owned_topics = topics_user_can_access(topics, u.profile['email'], user_is_admin())
topic_permission_list = [{
'topics_id': t['topics_id'],
'topic_name': t['name'],
'permission': t['user_permission'],
} for t in user_owned_topics]
writer = csv.DictWriter(outfile, ['topics_id', 'topic_name', 'permission'])
writer.writeheader()
writer.writerows(topic_permission_list)
# saved searches
with open(os.path.join(temp_dir, 'saved-searches.json'), 'w') as outfile:
search_list = user_db.get_users_lists(u.name, 'searches')
json.dump(search_list, outfile)
# starred sources
with open(os.path.join(temp_dir, 'starred-sources.csv'), 'w') as outfile:
user_favorited = user_db.get_users_lists(user_name(), 'favoriteSources')
media_sources = [user_mc.media(media_id) for media_id in user_favorited]
media_sources = [{
'media_id': m['media_id'],
'name': m['name'],
'url': m['url']
} for m in media_sources]
writer = csv.DictWriter(outfile, ['media_id', 'name', 'url'])
writer.writeheader()
writer.writerows(media_sources)
# starred collections
with open(os.path.join(temp_dir, 'starred-collections.csv'), 'w') as outfile:
user_favorited = user_db.get_users_lists(user_name(), 'favoriteCollections')
collections = [user_mc.tag(tags_id) for tags_id in user_favorited]
collections = [{
'tags_id': c['tags_id'],
'label': c['label'],
'description': c['description']
} for c in collections]
writer = csv.DictWriter(outfile, ['tags_id', 'label', 'description'])
writer.writeheader()
writer.writerows(collections)
# starred topics
with open(os.path.join(temp_dir, 'starred-topics.csv'), 'w') as outfile:
user_favorited = user_db.get_users_lists(user_name(), 'favoriteTopics')
topics = [user_mc.topic(topics_id) for topics_id in user_favorited]
topics = [{
'topics_id': t['topics_id'],
'name': t['name'],
'description': t['description']
} for t in topics]
writer = csv.DictWriter(outfile, ['topics_id', 'name', 'description'])
writer.writeheader()
writer.writerows(topics)
return temp_dir
|
import datetime
import json
import pkg_resources as pkg
import sys
import time
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslotest import base
import testtools.matchers as ttm
from mistral import context as auth_context
from mistral.db.sqlalchemy import base as db_sa_base
from mistral.db.sqlalchemy import sqlite_lock
from mistral.db.v2 import api as db_api_v2
from mistral.services import action_manager
from mistral.services import security
from mistral.tests import config as test_config
from mistral.utils import inspect_utils as i_utils
from mistral import version
RESOURCES_PATH = 'tests/resources/'
LOG = logging.getLogger(__name__)
test_config.parse_args()
def get_resource(resource_name):
return open(pkg.resource_filename(
version.version_info.package,
RESOURCES_PATH + resource_name)).read()
def get_context(default=True, admin=False):
if default:
return auth_context.MistralContext(
user_id='1-2-3-4',
project_id=security.DEFAULT_PROJECT_ID,
user_name='test-user',
project_name='test-project',
is_admin=admin
)
else:
return auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=admin
)
def register_action_class(name, cls, attributes=None, desc=None):
action_manager.register_action_class(
name,
'%s.%s' % (cls.__module__, cls.__name__),
attributes or {},
input_str=i_utils.get_arg_list_as_str(cls.__init__)
)
class FakeHTTPResponse(object):
def __init__(self, text, status_code, reason=None, headers=None,
history=None, encoding='utf8', url='', cookies=None,
elapsed=None):
self.text = text
self.content = text
self.status_code = status_code
self.reason = reason
self.headers = headers or {}
self.history = history
self.encoding = encoding
self.url = url
self.cookies = cookies or {}
self.elapsed = elapsed or datetime.timedelta(milliseconds=123)
def json(self):
return json.loads(self.text)
class BaseTest(base.BaseTestCase):
def assertListEqual(self, l1, l2):
if tuple(sys.version_info)[0:2] < (2, 7):
# for python 2.6 compatibility
self.assertEqual(l1, l2)
else:
super(BaseTest, self).assertListEqual(l1, l2)
def assertDictEqual(self, cmp1, cmp2):
if tuple(sys.version_info)[0:2] < (2, 7):
# for python 2.6 compatibility
self.assertThat(cmp1, ttm.Equals(cmp2))
else:
super(BaseTest, self).assertDictEqual(cmp1, cmp2)
def _assert_single_item(self, items, **props):
return self._assert_multiple_items(items, 1, **props)[0]
def _assert_multiple_items(self, items, count, **props):
def _matches(item, **props):
for prop_name, prop_val in props.iteritems():
v = item[prop_name] if isinstance(
item, dict) else getattr(item, prop_name)
if v != prop_val:
return False
return True
filtered_items = filter(lambda item: _matches(item, **props), items)
found = len(list(filtered_items))
if found != count:
LOG.info("[failed test ctx] items=%s, expected_props=%s" % (str(
items), props))
self.fail("Wrong number of items found [props=%s, "
"expected=%s, found=%s]" % (props, count, found))
return filtered_items
def _assert_dict_contains_subset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected.
Note: This is almost the exact copy of the standard method
assertDictContainsSubset() that appeared in Python 2.7, it was
added to use it with Python 2.6.
"""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(key, value,
actual[key]))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(m for m in missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def _await(self, predicate, delay=1, timeout=60):
"""Awaits for predicate function to evaluate to True.
If within a configured timeout predicate function hasn't evaluated
to True then an exception is raised.
:param predicate: Predication function.
:param delay: Delay in seconds between predicate function calls.
:param timeout: Maximum amount of time to wait for predication
function to evaluate to True.
:return:
"""
end_time = time.time() + timeout
while True:
if predicate():
break
if time.time() + delay > end_time:
raise AssertionError("Failed to wait for expected result.")
time.sleep(delay)
def _sleep(self, seconds):
time.sleep(seconds)
class DbTestCase(BaseTest):
is_heavy_init_called = False
@classmethod
def __heavy_init(cls):
"""Make this method private to prevent extending this one.
It runs heavy_init() only once.
Note: setUpClass() can be used, but it magically is not invoked
from child class in another module.
"""
if not cls.is_heavy_init_called:
cls.heavy_init()
cls.is_heavy_init_called = True
@classmethod
def heavy_init(cls):
"""Runs a long initialization (runs once by class)
and can be extended by child classes.
"""
# If using sqlite, change to memory. The default is file based.
if cfg.CONF.database.connection.startswith('sqlite'):
cfg.CONF.set_default('connection', 'sqlite://', group='database')
cfg.CONF.set_default('max_overflow', -1, group='database')
cfg.CONF.set_default('max_pool_size', 1000, group='database')
db_api_v2.setup_db()
action_manager.sync_db()
def _clean_db(self):
contexts = [
get_context(default=False),
get_context(default=True)
]
for ctx in contexts:
auth_context.set_ctx(ctx)
with mock.patch('mistral.services.security.get_project_id',
new=mock.MagicMock(return_value=ctx.project_id)):
with db_api_v2.transaction():
db_api_v2.delete_executions()
db_api_v2.delete_workbooks()
db_api_v2.delete_cron_triggers()
db_api_v2.delete_workflow_definitions()
db_api_v2.delete_environments()
sqlite_lock.cleanup()
if not cfg.CONF.database.connection.startswith('sqlite'):
db_sa_base.get_engine().dispose()
def setUp(self):
super(DbTestCase, self).setUp()
self.__heavy_init()
self.ctx = get_context()
auth_context.set_ctx(self.ctx)
self.addCleanup(auth_context.set_ctx, None)
self.addCleanup(self._clean_db)
def is_db_session_open(self):
return db_sa_base._get_thread_local_session() is not None
|
from neutron_lib.utils import runtime
from oslo_config import cfg
from oslo_log import log as logging
from oslo_upgradecheck import upgradecheck
CHECKS_ENTRYPOINTS = 'neutron.status.upgrade.checks'
LOG = logging.getLogger(__name__)
def load_checks():
checks = []
ns_plugin = runtime.NamespacedPlugins(CHECKS_ENTRYPOINTS)
# TODO(slaweq): stop using private attribute of runtime.NamespacedPlugins
# class when it will provide some better way to access extensions
for module_name, module in ns_plugin._extensions.items():
try:
project_checks_class = module.entry_point.load()
project_checks = project_checks_class().get_checks()
if project_checks:
checks.append(project_checks)
except Exception as e:
LOG.exception("Checks class %(entrypoint)s failed to load. "
"Error: %(err)s",
{'entrypoint': module_name, 'err': e})
continue
return tuple(checks)
class Checker(upgradecheck.UpgradeCommands):
"""Various upgrade checks should be added as separate methods in this class
and added to _upgrade_checks tuple.
Check methods here must not rely on the neutron object model since they
should be able to run against both N and N-1 releases. Any queries to
the database should be done through the sqlalchemy query language directly
like the database schema migrations.
"""
# The format of the check functions is to return an
# oslo_upgradecheck.upgradecheck.Result
# object with the appropriate
# oslo_upgradecheck.upgradecheck.Code and details set.
# If the check hits warnings or failures then those should be stored
# in the returned Result's "details" attribute. The
# summary will be rolled up at the end of the check() method.
_upgrade_checks = load_checks()
def main():
return upgradecheck.main(
cfg.CONF, project='neutron', upgrade_command=Checker())
|
import logging
import concurrent
import time
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import Select, Paragraph
import modules.air
import modules.temperature
import modules.population
import modules.precipitation
from states import NAMES
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
modules = [modules.air.Module(), modules.temperature.Module(), modules.population.Module(), modules.precipitation.Module()]
def fetch_data(state):
"""
Fetch data from BigQuery for the given US state by running
the queries for all dashboard modules in parallel.
"""
t0 = time.time()
# Collect fetch methods for all dashboard modules
fetch_methods = {module.id: getattr(module, 'fetch_data') for module in modules}
# Create a thread pool: one separate thread for each dashboard module
with concurrent.futures.ThreadPoolExecutor(max_workers=len(fetch_methods)) as executor:
# Prepare the thread tasks
tasks = {}
for key, fetch_method in fetch_methods.items():
task = executor.submit(fetch_method, state)
tasks[task] = key
# Run the tasks and collect results as they arrive
results = {}
for task in concurrent.futures.as_completed(tasks):
key = tasks[task]
results[key] = task.result()
# Return results once all tasks have been completed
t1 = time.time()
timer.text = '(Execution time: %s seconds)' % round(t1 - t0, 4)
return results
def update(attrname, old, new):
timer.text = '(Executing %s queries...)' % len(modules)
for module in modules:
getattr(module, 'busy')()
results = fetch_data(new)
for module in modules:
getattr(module, 'update_plot')(results[module.id])
for module in modules:
getattr(module, 'unbusy')()
state = 'California'
state_select = Select(title='Select a state:', value=state, options=NAMES)
state_select.on_change('value', update)
timer = Paragraph()
results = fetch_data(state)
blocks = {}
for module in modules:
block = getattr(module, 'make_plot')(results[module.id])
blocks[module.id] = block
curdoc().add_root(
column(
row(state_select, timer),
row(
column(blocks['modules.air'], blocks['modules.temperature']),
column(blocks['modules.precipitation'], blocks['modules.population']),
)
)
)
curdoc().title = "Dashboard Demo"
|
import reversion
from django.contrib.admin import ModelAdmin, register
from django.db import models
from django_handleref.admin import VersionAdmin
from tests.models import HandleRefModel, Org
@reversion.register
class VersionedOrg(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
website = models.URLField(blank=True)
notes = models.TextField(blank=True)
class HandleRef:
tag = "org"
delete_cascade = ["sub_entities"]
def __unicode__(self):
return self.name
@register(VersionedOrg)
class OrgAdmin(VersionAdmin, ModelAdmin):
pass
|
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
carry = 0
resultHead = c = ListNode(0)
while l1 or l2 or carry > 0:
num1 = l1.val if l1 else 0
num2 = l2.val if l2 else 0
carry = (num1 + num2 + carry)
c.next = ListNode(carry % 10)
carry = carry // 10
c = c.next
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
return resultHead.next
|
import people.people_class as p
from inventory.inventory_list import inventory_list
from weapons.weapon_list import weapon_list
person = p.people()
person.name = 'skellington_1'
person.health = 2
person.descript = 'This is a spooky scary skellington'
person.weapon = weapon_list['sword']
person.armor = 0
person.hostile = 1
skellington_1 = person #change this to be the person that you want
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot9'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot10'],
[TestAction.create_volume_snapshot, 'volume2', 'volume2-snapshot14'],
[TestAction.create_volume_snapshot, 'volume2', 'volume2-snapshot15'],
[TestAction.create_volume_snapshot, 'volume1', 'volume1-snapshot16'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot17'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume2-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_snapshot, 'vm1-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.reinit_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
[TestAction.delete_volume_snapshot, 'vm1-root-snapshot9'],
[TestAction.batch_delete_snapshots, ['volume2-snapshot10','volume3-snapshot10',]],
])
'''
The final status:
Running:[]
Stopped:['vm1']
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'vm1-snapshot10', 'volume1-snapshot10', 'volume2-snapshot14', 'volume2-snapshot15', 'volume1-snapshot16', 'vm1-snapshot17', 'volume1-snapshot17', 'volume2-snapshot17', 'volume3-snapshot17']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-root-snapshot9', 'volume2-snapshot10', 'volume3-snapshot10']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5']---vm1volume1_volume2_volume3
vm_snap4:['vm1-snapshot17', 'volume1-snapshot17', 'volume2-snapshot17', 'volume3-snapshot17']---vm1volume1_volume2_volume3
'''
|
from dev_tools import incremental_coverage
def test_determine_ignored_lines():
f = incremental_coverage.determine_ignored_lines
assert f("a = 0 # coverage: ignore") == {1}
assert (
f(
"""
a = 0 # coverage: ignore
b = 0
"""
)
== {2}
)
assert (
f(
"""
a = 0
b = 0 # coverage: ignore
"""
)
== {3}
)
assert (
f(
"""
a = 0 # coverage: ignore
b = 0 # coverage: ignore
"""
)
== {2, 3}
)
assert (
f(
"""
if True:
a = 0 # coverage: ignore
b = 0
"""
)
== {3}
)
assert (
f(
"""
if True:
# coverage: ignore
a = 0
b = 0
"""
)
== {3, 4, 5, 6, 7}
)
assert (
f(
"""
if True:
# coverage: ignore
a = 0
b = 0
stop = 1
"""
)
== {3, 4, 5, 6}
)
assert (
f(
"""
if True:
# coverage: ignore
a = 0
b = 0
else:
c = 0
"""
)
== {3, 4, 5, 6}
)
assert (
f(
"""
if True:
while False:
# coverage: ignore
a = 0
b = 0
else:
c = 0 # coverage: ignore
"""
)
== {4, 5, 6, 9}
)
assert (
f(
"""
a = 2#coverage:ignore
a = 3 #coverage:ignore
a = 4# coverage:ignore
a = 5#coverage :ignore
a = 6#coverage: ignore
a = 7#coverage: ignore\t
a = 8#coverage:\tignore\t
b = 1 # no cover
b = 2 # coverage: definitely
b = 3 # lint: ignore
"""
)
== {2, 3, 4, 5, 6, 7, 8}
)
assert (
f(
"""
if TYPE_CHECKING:
import cirq
import foo
def bar(a: 'cirq.Circuit'):
pass
"""
)
== {2, 3, 4}
)
|
from cdn.storage import base
class ServicesController(base.ServicesBase):
def list(self):
services = {
"links": [
{
"rel": "next",
"href": "/v1.0/services?marker=www.myothersite.com&limit=20"
}
],
"services" : [
{
"domains": [
{
"domain": "www.mywebsite.com"
}
],
"origins": [
{
"origin": "mywebsite.com",
"port": 80,
"ssl": False
}
],
"caching": [
{ "name" : "default", "ttl" : 3600 },
{
"name" : "home",
"ttl" : 17200,
"rules" : [
{ "name" : "index", "request_url" : "/index.htm" }
]
},
{
"name" : "images",
"ttl" : 12800,
"rules" : [
{ "name" : "images", "request_url" : "*.png" }
]
}
],
"restrictions" : [
{
"name" : "website only",
"rules" : [ { "name" : "mywebsite.com", "http_host" : "www.mywebsite.com" } ]
}
],
"links" : [
{
"href": "/v1.0/services/mywebsite",
"rel" : "self"
}
]
}
]
}
return services
def get(self):
# get the requested service from storage
print "get service"
def create(self, service_name, service_json):
# create the service in storage
service = service_json
# create at providers
return super(ServicesController, self).create(service_name, service)
def update(self, service_name, service_json):
# update configuration in storage
# update at providers
return super(ServicesController, self).update(service_name, service_json)
def delete(self, service_name):
# delete local configuration from storage
# delete from providers
return super(ServicesController, self).delete(service_name)
|
"""Random agent for running against DM Lab2D environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
def _make_int32_distribution(random, minimum, maximum):
def function():
return random.randint(minimum, maximum + 1)
return function
def _make_float64_distribution(random, minimum, maximum):
def function():
return random.uniform(minimum, maximum)
return function
class PyGameRandomAgent(object):
"""Random agent works with int32 or float64 bounded actions."""
def __init__(self, action_spec, observation_name, observation_spec, seed,
scale):
"""Create a PyGame agent.
Args:
action_spec: Environment action spec used to generate random actions.
observation_name: Name of observation to render each frame.
observation_spec: Environment observation spec for creating PyGame window.
seed: Agent seed used for generating random actions.
scale: Scales screen.
"""
self._observation_name = observation_name
random = np.random.RandomState(seed)
self._actions = []
self._scores = []
self._scale = scale
for name, spec in action_spec.items():
if spec.dtype == np.dtype('int32'):
self._actions.append(
(name, _make_int32_distribution(random, spec.minimum,
spec.maximum)))
elif spec.dtype == np.dtype('float64'):
self._actions.append(
(name, _make_float64_distribution(random, spec.minimum,
spec.maximum)))
else:
print("Warning '{}' is not supported".format(spec))
obs_spec = observation_spec[observation_name]
self._setup_py_game(obs_spec.shape)
def _setup_py_game(self, shape):
pygame.init()
pygame.display.set_caption('DM Lab2d')
self._game_display = pygame.display.set_mode(
(int(shape[1] * self._scale), int(shape[0] * self._scale)))
def _render_observation(self, observation):
obs = np.transpose(observation, (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (int(rect[2] * self._scale), int(rect[3] * self._scale)))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
def step(self, timestep):
"""Renders timestep and returns random actions according to spec."""
self._render_observation(timestep.observation[self._observation_name])
display_score_dirty = False
if timestep.reward is not None:
if timestep.reward != 0:
self._scores[-1] += timestep.reward
display_score_dirty = True
else:
self._scores.append(0)
display_score_dirty = True
if display_score_dirty:
pygame.display.set_caption('%d score' % self._scores[-1])
return {name: gen() for name, gen in self._actions}
def print_stats(self):
print('Scores: ' + ', '.join(str(score) for score in self._scores))
def _create_environment(args):
"""Creates an environment.
Args:
args: See `main()` for description of args.
Returns:
dmlab2d.Environment with one observation.
"""
args.settings['levelName'] = args.level_name
lab2d = dmlab2d.Lab2d(runfiles_helper.find(), args.settings)
return dmlab2d.Environment(lab2d, [args.observation], args.env_seed)
def _run(args):
"""Runs a random agent against an environment rendering the results.
Args:
args: See `main()` for description of args.
"""
env = _create_environment(args)
agent = PyGameRandomAgent(env.action_spec(), args.observation,
env.observation_spec(), args.agent_seed, args.scale)
for _ in range(args.num_episodes):
timestep = env.reset()
# Run single episode.
while True:
# Query PyGame for early termination.
if any(event.type == pygame.QUIT for event in pygame.event.get()):
print('Exit early last score may be truncated:')
agent.print_stats()
return
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
# Observe last frame of episode.
agent.step(timestep)
break
# All episodes completed, report per episode.
agent.print_stats()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up', help='Level name to load')
parser.add_argument(
'--observation',
type=str,
default='WORLD.RGB',
help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--env_seed', type=int, default=0, help='Environment seed')
parser.add_argument('--agent_seed', type=int, default=0, help='Agent seed')
parser.add_argument(
'--num_episodes', type=int, default=1, help='Number of episodes')
parser.add_argument(
'--scale', type=float, default=1, help='Scale to render screen')
args = parser.parse_args()
_run(args)
if __name__ == '__main__':
main()
|
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import domain_category
from google.ads.googleads.v8.services.types import domain_category_service
from .transports.base import DomainCategoryServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DomainCategoryServiceGrpcTransport
class DomainCategoryServiceClientMeta(type):
"""Metaclass for the DomainCategoryService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DomainCategoryServiceTransport]]
_transport_registry["grpc"] = DomainCategoryServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DomainCategoryServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DomainCategoryServiceClient(metaclass=DomainCategoryServiceClientMeta):
"""Service to fetch domain categories."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DomainCategoryServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DomainCategoryServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DomainCategoryServiceTransport:
"""Return the transport used by the client instance.
Returns:
DomainCategoryServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def campaign_path(customer_id: str, campaign_id: str,) -> str:
"""Return a fully-qualified campaign string."""
return "customers/{customer_id}/campaigns/{campaign_id}".format(
customer_id=customer_id, campaign_id=campaign_id,
)
@staticmethod
def parse_campaign_path(path: str) -> Dict[str, str]:
"""Parse a campaign path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/campaigns/(?P<campaign_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def domain_category_path(
customer_id: str,
campaign_id: str,
base64_category: str,
language_code: str,
) -> str:
"""Return a fully-qualified domain_category string."""
return "customers/{customer_id}/domainCategories/{campaign_id}~{base64_category}~{language_code}".format(
customer_id=customer_id,
campaign_id=campaign_id,
base64_category=base64_category,
language_code=language_code,
)
@staticmethod
def parse_domain_category_path(path: str) -> Dict[str, str]:
"""Parse a domain_category path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/domainCategories/(?P<campaign_id>.+?)~(?P<base64_category>.+?)~(?P<language_code>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DomainCategoryServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the domain category service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DomainCategoryServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DomainCategoryServiceTransport):
# transport is a DomainCategoryServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = DomainCategoryServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_domain_category(
self,
request: domain_category_service.GetDomainCategoryRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> domain_category.DomainCategory:
r"""Returns the requested domain category.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetDomainCategoryRequest`):
The request object. Request message for
[DomainCategoryService.GetDomainCategory][google.ads.googleads.v8.services.DomainCategoryService.GetDomainCategory].
resource_name (:class:`str`):
Required. Resource name of the domain
category to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.DomainCategory:
A category generated automatically by
crawling a domain. If a campaign uses
the DynamicSearchAdsSetting, then domain
categories will be generated for the
domain. The categories can be targeted
using WebpageConditionInfo. See:
https://support.google.com/google-
ads/answer/2471185
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a domain_category_service.GetDomainCategoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, domain_category_service.GetDomainCategoryRequest
):
request = domain_category_service.GetDomainCategoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_domain_category
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("DomainCategoryServiceClient",)
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.vision_v1p3beta1.types import product_search_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ProductSearchTransport(abc.ABC):
"""Abstract transport class for ProductSearch."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
DEFAULT_HOST: str = "vision.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_product_set: gapic_v1.method.wrap_method(
self.create_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_product_sets: gapic_v1.method.wrap_method(
self.list_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product_set: gapic_v1.method.wrap_method(
self.get_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product_set: gapic_v1.method.wrap_method(
self.update_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product_set: gapic_v1.method.wrap_method(
self.delete_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_product: gapic_v1.method.wrap_method(
self.create_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products: gapic_v1.method.wrap_method(
self.list_products,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_product: gapic_v1.method.wrap_method(
self.get_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_product: gapic_v1.method.wrap_method(
self.update_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_product: gapic_v1.method.wrap_method(
self.delete_product,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_reference_image: gapic_v1.method.wrap_method(
self.create_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_reference_image: gapic_v1.method.wrap_method(
self.delete_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_reference_images: gapic_v1.method.wrap_method(
self.list_reference_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_reference_image: gapic_v1.method.wrap_method(
self.get_reference_image,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.add_product_to_product_set: gapic_v1.method.wrap_method(
self.add_product_to_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.remove_product_from_product_set: gapic_v1.method.wrap_method(
self.remove_product_from_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_products_in_product_set: gapic_v1.method.wrap_method(
self.list_products_in_product_set,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.import_product_sets: gapic_v1.method.wrap_method(
self.import_product_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_product_set(
self,
) -> Callable[
[product_search_service.CreateProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def list_product_sets(
self,
) -> Callable[
[product_search_service.ListProductSetsRequest],
Union[
product_search_service.ListProductSetsResponse,
Awaitable[product_search_service.ListProductSetsResponse],
],
]:
raise NotImplementedError()
@property
def get_product_set(
self,
) -> Callable[
[product_search_service.GetProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def update_product_set(
self,
) -> Callable[
[product_search_service.UpdateProductSetRequest],
Union[
product_search_service.ProductSet,
Awaitable[product_search_service.ProductSet],
],
]:
raise NotImplementedError()
@property
def delete_product_set(
self,
) -> Callable[
[product_search_service.DeleteProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_product(
self,
) -> Callable[
[product_search_service.CreateProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def list_products(
self,
) -> Callable[
[product_search_service.ListProductsRequest],
Union[
product_search_service.ListProductsResponse,
Awaitable[product_search_service.ListProductsResponse],
],
]:
raise NotImplementedError()
@property
def get_product(
self,
) -> Callable[
[product_search_service.GetProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def update_product(
self,
) -> Callable[
[product_search_service.UpdateProductRequest],
Union[
product_search_service.Product, Awaitable[product_search_service.Product]
],
]:
raise NotImplementedError()
@property
def delete_product(
self,
) -> Callable[
[product_search_service.DeleteProductRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_reference_image(
self,
) -> Callable[
[product_search_service.CreateReferenceImageRequest],
Union[
product_search_service.ReferenceImage,
Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def delete_reference_image(
self,
) -> Callable[
[product_search_service.DeleteReferenceImageRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_reference_images(
self,
) -> Callable[
[product_search_service.ListReferenceImagesRequest],
Union[
product_search_service.ListReferenceImagesResponse,
Awaitable[product_search_service.ListReferenceImagesResponse],
],
]:
raise NotImplementedError()
@property
def get_reference_image(
self,
) -> Callable[
[product_search_service.GetReferenceImageRequest],
Union[
product_search_service.ReferenceImage,
Awaitable[product_search_service.ReferenceImage],
],
]:
raise NotImplementedError()
@property
def add_product_to_product_set(
self,
) -> Callable[
[product_search_service.AddProductToProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def remove_product_from_product_set(
self,
) -> Callable[
[product_search_service.RemoveProductFromProductSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_products_in_product_set(
self,
) -> Callable[
[product_search_service.ListProductsInProductSetRequest],
Union[
product_search_service.ListProductsInProductSetResponse,
Awaitable[product_search_service.ListProductsInProductSetResponse],
],
]:
raise NotImplementedError()
@property
def import_product_sets(
self,
) -> Callable[
[product_search_service.ImportProductSetsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("ProductSearchTransport",)
|
"""
Downloads and extracts an archive based on a provided manifest.
"""
from __future__ import print_function
import locale
import os
import signal
import sys
import threading
import time
import json
import tempfile
import tarfile
import hashlib
from etaprogress.progress import ProgressBarWget
import requests
def error(message, code=1):
"""Prints an error message to stderr and exits with a status of 1 by default."""
if message:
print('ERROR: {0}'.format(message), file=sys.stderr)
else:
print(file=sys.stderr)
sys.exit(code)
class DownloadThread(threading.Thread):
"""Downloads the file, but doesn't save it (just the file size)."""
def __init__(self, response, checksum):
super(DownloadThread, self).__init__()
self.response = response
self.checksum = checksum
self._bytes_downloaded = 0
self._failed = False
self.daemon = True
def run(self):
with tempfile.TemporaryFile() as archive:
hashgen = hashlib.new(self.checksum[0])
for chunk in self.response.iter_content(1024):
self._bytes_downloaded += len(chunk)
archive.write(chunk)
hashgen.update(chunk)
if hashgen.hexdigest() != self.checksum[1]:
self._failed = True
return
archive.seek(0)
tar = tarfile.open(mode='r:gz', fileobj=archive)
tar.extractall()
@property
def bytes_downloaded(self):
"""Read-only interface to _bytes_downloaded."""
return self._bytes_downloaded
@property
def failed(self):
return self._failed
def main():
"""From: http://stackoverflow.com/questions/20801034/how-to-measure-download-speed-and-progress-using-requests"""
# Prepare.
if os.name == 'nt':
locale.setlocale(locale.LC_ALL, 'english-us')
else:
locale.resetlocale()
if len(sys.argv) < 2:
error('Path to manifest is missing!')
manifest = sys.argv[1]
os.chdir(os.path.dirname(manifest))
with open(os.path.basename(manifest), 'r') as hdl:
meta = json.load(hdl)
chk_file = os.path.basename(manifest) + '.chk'
last_chk = None
if os.path.isfile(chk_file):
with open(chk_file, 'r') as stream:
last_chk = stream.read().strip()
if last_chk == '#'.join(meta['checksum']):
return
print('%s has changed or has not been downloaded, yet. Downloading...' % manifest)
response = requests.get(meta['url'], stream=True)
content_length = None if meta.get('ignore_length', False) else int(response.headers.get('Content-Length', 0))
progress_bar = ProgressBarWget(content_length, eta_every=4)
thread = DownloadThread(response, meta['checksum'])
print_every_seconds = 0.25
# Download.
thread.start()
while True:
progress_bar.numerator = thread.bytes_downloaded
print(progress_bar, end='\r')
sys.stdout.flush()
# For undefined downloads (no content-length), check if thread has stopped. Loop only checks defined downloads.
if not thread.isAlive():
progress_bar.force_done = True
break
if progress_bar.done:
break
time.sleep(print_every_seconds)
print(progress_bar) # Always print one last time.
if thread.failed:
error('The download failed because the download was incomplete or corrupted!')
thread.join()
with open(chk_file, 'w') as stream:
stream.write('#'.join(meta['checksum']))
if __name__ == '__main__':
signal.signal(signal.SIGINT, lambda *_: error('', 0)) # Properly handle Control+C
main()
|
from eight_mile.calibration.plot.confidence_histogram import confidence_histogram
from eight_mile.calibration.plot.reliability_diagram import reliability_diagram, reliability_curve
|
"""The artifacts filter file CLI arguments helper."""
from __future__ import unicode_literals
import os
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class ArtifactFiltersArgumentsHelper(interface.ArgumentsHelper):
"""Artifacts filter file CLI arguments helper."""
NAME = 'artifact_filters'
DESCRIPTION = 'Artifact filters command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--artifact_filters', '--artifact-filters',
dest='artifact_filter_string', type=str, default=None,
metavar='ARTIFACT_FILTERS', action='store', help=(
'Names of forensic artifact definitions, provided on the command '
'command line (comma separated). Forensic artifacts are stored '
'in .yaml files that are directly pulled from the artifact '
'definitions project. You can also specify a custom '
'artifacts yaml file (see --custom_artifact_definitions). Artifact '
'definitions can be used to describe and quickly collect data of '
'interest, such as specific files or Windows Registry keys.'))
argument_group.add_argument(
'--artifact_filters_file', '--artifact-filters_file',
dest='artifact_filters_file', type=str, default=None,
metavar='PATH', action='store', help=(
'Names of forensic artifact definitions, provided in a file with '
'one artifact name per line. Forensic artifacts are stored in '
'.yaml files that are directly pulled from the artifact '
'definitions project. You can also specify a custom artifacts '
'yaml file (see --custom_artifact_definitions). Artifact '
'definitions can be used to describe and quickly collect data of '
'interest, such as specific files or Windows Registry keys.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the required artifact definitions are not defined.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
artifact_filters = cls._ParseStringOption(options, 'artifact_filter_string')
artifact_filters_file = cls._ParseStringOption(
options, 'artifact_filters_file')
filter_file = cls._ParseStringOption(options, 'file_filter')
if artifact_filters and artifact_filters_file:
raise errors.BadConfigOption(
'Please only specify artifact definition names in a file '
'or on the command line.')
if (artifact_filters_file or artifact_filters) and filter_file:
raise errors.BadConfigOption(
'Please do not specify both artifact definitions and legacy filters.')
if artifact_filters_file and os.path.isfile(artifact_filters_file):
with open(artifact_filters_file) as file_object:
file_content = file_object.read()
artifact_filters = file_content.splitlines()
elif artifact_filters:
artifact_filters = [name.strip() for name in artifact_filters.split(',')]
setattr(configuration_object, '_artifact_filters', artifact_filters)
manager.ArgumentHelperManager.RegisterHelper(ArtifactFiltersArgumentsHelper)
|
from dulwich.object_store import (
MemoryObjectStore,
)
from dulwich.objects import (
Blob,
)
from dulwich.tests import TestCase
from dulwich.tests.utils import (
make_object,
make_tag,
build_commit_graph,
)
class MissingObjectFinderTest(TestCase):
def setUp(self):
super(MissingObjectFinderTest, self).setUp()
self.store = MemoryObjectStore()
self.commits = []
def cmt(self, n):
return self.commits[n - 1]
def assertMissingMatch(self, haves, wants, expected):
for sha, path in self.store.find_missing_objects(haves, wants, set()):
self.assertTrue(
sha in expected,
"(%s,%s) erroneously reported as missing" % (sha, path),
)
expected.remove(sha)
self.assertEqual(
len(expected),
0,
"some objects are not reported as missing: %s" % (expected,),
)
class MOFLinearRepoTest(MissingObjectFinderTest):
def setUp(self):
super(MOFLinearRepoTest, self).setUp()
# present in 1, removed in 3
f1_1 = make_object(Blob, data=b"f1")
# present in all revisions, changed in 2 and 3
f2_1 = make_object(Blob, data=b"f2")
f2_2 = make_object(Blob, data=b"f2-changed")
f2_3 = make_object(Blob, data=b"f2-changed-again")
# added in 2, left unmodified in 3
f3_2 = make_object(Blob, data=b"f3")
commit_spec = [[1], [2, 1], [3, 2]]
trees = {
1: [(b"f1", f1_1), (b"f2", f2_1)],
2: [(b"f1", f1_1), (b"f2", f2_2), (b"f3", f3_2)],
3: [(b"f2", f2_3), (b"f3", f3_2)],
}
# commit 1: f1 and f2
# commit 2: f3 added, f2 changed. Missing shall report commit id and a
# tree referenced by commit
# commit 3: f1 removed, f2 changed. Commit sha and root tree sha shall
# be reported as modified
self.commits = build_commit_graph(self.store, commit_spec, trees)
self.missing_1_2 = [self.cmt(2).id, self.cmt(2).tree, f2_2.id, f3_2.id]
self.missing_2_3 = [self.cmt(3).id, self.cmt(3).tree, f2_3.id]
self.missing_1_3 = [
self.cmt(2).id,
self.cmt(3).id,
self.cmt(2).tree,
self.cmt(3).tree,
f2_2.id,
f3_2.id,
f2_3.id,
]
def test_1_to_2(self):
self.assertMissingMatch([self.cmt(1).id], [self.cmt(2).id], self.missing_1_2)
def test_2_to_3(self):
self.assertMissingMatch([self.cmt(2).id], [self.cmt(3).id], self.missing_2_3)
def test_1_to_3(self):
self.assertMissingMatch([self.cmt(1).id], [self.cmt(3).id], self.missing_1_3)
def test_bogus_haves(self):
"""Ensure non-existent SHA in haves are tolerated"""
bogus_sha = self.cmt(2).id[::-1]
haves = [self.cmt(1).id, bogus_sha]
wants = [self.cmt(3).id]
self.assertMissingMatch(haves, wants, self.missing_1_3)
def test_bogus_wants_failure(self):
"""Ensure non-existent SHA in wants are not tolerated"""
bogus_sha = self.cmt(2).id[::-1]
haves = [self.cmt(1).id]
wants = [self.cmt(3).id, bogus_sha]
self.assertRaises(
KeyError, self.store.find_missing_objects, haves, wants, set()
)
def test_no_changes(self):
self.assertMissingMatch([self.cmt(3).id], [self.cmt(3).id], [])
class MOFMergeForkRepoTest(MissingObjectFinderTest):
# 1 --- 2 --- 4 --- 6 --- 7
# \ /
# 3 ---
# \
# 5
def setUp(self):
super(MOFMergeForkRepoTest, self).setUp()
f1_1 = make_object(Blob, data=b"f1")
f1_2 = make_object(Blob, data=b"f1-2")
f1_4 = make_object(Blob, data=b"f1-4")
f1_7 = make_object(Blob, data=b"f1-2") # same data as in rev 2
f2_1 = make_object(Blob, data=b"f2")
f2_3 = make_object(Blob, data=b"f2-3")
f3_3 = make_object(Blob, data=b"f3")
f3_5 = make_object(Blob, data=b"f3-5")
commit_spec = [[1], [2, 1], [3, 2], [4, 2], [5, 3], [6, 3, 4], [7, 6]]
trees = {
1: [(b"f1", f1_1), (b"f2", f2_1)],
2: [(b"f1", f1_2), (b"f2", f2_1)], # f1 changed
# f3 added, f2 changed
3: [(b"f1", f1_2), (b"f2", f2_3), (b"f3", f3_3)],
4: [(b"f1", f1_4), (b"f2", f2_1)], # f1 changed
5: [(b"f1", f1_2), (b"f3", f3_5)], # f2 removed, f3 changed
# merged 3 and 4
6: [(b"f1", f1_4), (b"f2", f2_3), (b"f3", f3_3)],
# f1 changed to match rev2. f3 removed
7: [(b"f1", f1_7), (b"f2", f2_3)],
}
self.commits = build_commit_graph(self.store, commit_spec, trees)
self.f1_2_id = f1_2.id
self.f1_4_id = f1_4.id
self.f1_7_id = f1_7.id
self.f2_3_id = f2_3.id
self.f3_3_id = f3_3.id
self.assertEqual(f1_2.id, f1_7.id, "[sanity]")
def test_have6_want7(self):
# have 6, want 7. Ideally, shall not report f1_7 as it's the same as
# f1_2, however, to do so, MissingObjectFinder shall not record trees
# of common commits only, but also all parent trees and tree items,
# which is an overkill (i.e. in sha_done it records f1_4 as known, and
# doesn't record f1_2 was known prior to that, hence can't detect f1_7
# is in fact f1_2 and shall not be reported)
self.assertMissingMatch(
[self.cmt(6).id],
[self.cmt(7).id],
[self.cmt(7).id, self.cmt(7).tree, self.f1_7_id],
)
def test_have4_want7(self):
# have 4, want 7. Shall not include rev5 as it is not in the tree
# between 4 and 7 (well, it is, but its SHA's are irrelevant for 4..7
# commit hierarchy)
self.assertMissingMatch(
[self.cmt(4).id],
[self.cmt(7).id],
[
self.cmt(7).id,
self.cmt(6).id,
self.cmt(3).id,
self.cmt(7).tree,
self.cmt(6).tree,
self.cmt(3).tree,
self.f2_3_id,
self.f3_3_id,
],
)
def test_have1_want6(self):
# have 1, want 6. Shall not include rev5
self.assertMissingMatch(
[self.cmt(1).id],
[self.cmt(6).id],
[
self.cmt(6).id,
self.cmt(4).id,
self.cmt(3).id,
self.cmt(2).id,
self.cmt(6).tree,
self.cmt(4).tree,
self.cmt(3).tree,
self.cmt(2).tree,
self.f1_2_id,
self.f1_4_id,
self.f2_3_id,
self.f3_3_id,
],
)
def test_have3_want6(self):
# have 3, want 7. Shall not report rev2 and its tree, because
# haves(3) means has parents, i.e. rev2, too
# BUT shall report any changes descending rev2 (excluding rev3)
# Shall NOT report f1_7 as it's technically == f1_2
self.assertMissingMatch(
[self.cmt(3).id],
[self.cmt(7).id],
[
self.cmt(7).id,
self.cmt(6).id,
self.cmt(4).id,
self.cmt(7).tree,
self.cmt(6).tree,
self.cmt(4).tree,
self.f1_4_id,
],
)
def test_have5_want7(self):
# have 5, want 7. Common parent is rev2, hence children of rev2 from
# a descent line other than rev5 shall be reported
# expects f1_4 from rev6. f3_5 is known in rev5;
# f1_7 shall be the same as f1_2 (known, too)
self.assertMissingMatch(
[self.cmt(5).id],
[self.cmt(7).id],
[
self.cmt(7).id,
self.cmt(6).id,
self.cmt(4).id,
self.cmt(7).tree,
self.cmt(6).tree,
self.cmt(4).tree,
self.f1_4_id,
],
)
class MOFTagsTest(MissingObjectFinderTest):
def setUp(self):
super(MOFTagsTest, self).setUp()
f1_1 = make_object(Blob, data=b"f1")
commit_spec = [[1]]
trees = {1: [(b"f1", f1_1)]}
self.commits = build_commit_graph(self.store, commit_spec, trees)
self._normal_tag = make_tag(self.cmt(1))
self.store.add_object(self._normal_tag)
self._tag_of_tag = make_tag(self._normal_tag)
self.store.add_object(self._tag_of_tag)
self._tag_of_tree = make_tag(self.store[self.cmt(1).tree])
self.store.add_object(self._tag_of_tree)
self._tag_of_blob = make_tag(f1_1)
self.store.add_object(self._tag_of_blob)
self._tag_of_tag_of_blob = make_tag(self._tag_of_blob)
self.store.add_object(self._tag_of_tag_of_blob)
self.f1_1_id = f1_1.id
def test_tagged_commit(self):
# The user already has the tagged commit, all they want is the tag,
# so send them only the tag object.
self.assertMissingMatch(
[self.cmt(1).id], [self._normal_tag.id], [self._normal_tag.id]
)
# The remaining cases are unusual, but do happen in the wild.
def test_tagged_tag(self):
# User already has tagged tag, send only tag of tag
self.assertMissingMatch(
[self._normal_tag.id], [self._tag_of_tag.id], [self._tag_of_tag.id]
)
# User needs both tags, but already has commit
self.assertMissingMatch(
[self.cmt(1).id],
[self._tag_of_tag.id],
[self._normal_tag.id, self._tag_of_tag.id],
)
def test_tagged_tree(self):
self.assertMissingMatch(
[],
[self._tag_of_tree.id],
[self._tag_of_tree.id, self.cmt(1).tree, self.f1_1_id],
)
def test_tagged_blob(self):
self.assertMissingMatch(
[], [self._tag_of_blob.id], [self._tag_of_blob.id, self.f1_1_id]
)
def test_tagged_tagged_blob(self):
self.assertMissingMatch(
[],
[self._tag_of_tag_of_blob.id],
[self._tag_of_tag_of_blob.id, self._tag_of_blob.id, self.f1_1_id],
)
|
import os
import sys
from setuptools import setup, find_packages
sys.path.insert(0, 'src')
import blockdiag
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development",
"Topic :: Software Development :: Documentation",
"Topic :: Text Processing :: Markup",
]
requires = ['setuptools',
'funcparserlib',
'webcolors',
'Pillow']
test_requires = ['nose',
'pep8>=1.3',
'reportlab',
'docutils']
if sys.version_info > (2, 6) and sys.version_info < (2, 7):
requires.append('OrderedDict')
test_requires.append('unittest2')
setup(
name='blockdiag',
version=blockdiag.__version__,
description='blockdiag generates block-diagram image from text',
long_description=open("README.rst").read(),
classifiers=classifiers,
keywords=['diagram', 'generator'],
author='Takeshi Komiya',
author_email='i.tkomiya at gmail.com',
url='http://blockdiag.com/',
download_url='http://pypi.python.org/pypi/blockdiag',
license='Apache License 2.0',
py_modules=['blockdiag_sphinxhelper'],
packages=find_packages('src'),
package_dir={'': 'src'},
package_data={'': ['buildout.cfg']},
include_package_data=True,
install_requires=requires,
extras_require=dict(
testing=test_requires,
pdf=[
'reportlab',
],
rst=[
'docutils',
],
),
test_suite='nose.collector',
tests_require=test_requires,
entry_points="""
[console_scripts]
blockdiag = blockdiag.command:main
[blockdiag_noderenderer]
box = blockdiag.noderenderer.box
square = blockdiag.noderenderer.square
roundedbox = blockdiag.noderenderer.roundedbox
diamond = blockdiag.noderenderer.diamond
minidiamond = blockdiag.noderenderer.minidiamond
mail = blockdiag.noderenderer.mail
note = blockdiag.noderenderer.note
cloud = blockdiag.noderenderer.cloud
circle = blockdiag.noderenderer.circle
ellipse = blockdiag.noderenderer.ellipse
beginpoint = blockdiag.noderenderer.beginpoint
endpoint = blockdiag.noderenderer.endpoint
actor = blockdiag.noderenderer.actor
flowchart.database = blockdiag.noderenderer.flowchart.database
flowchart.input = blockdiag.noderenderer.flowchart.input
flowchart.loopin = blockdiag.noderenderer.flowchart.loopin
flowchart.loopout = blockdiag.noderenderer.flowchart.loopout
flowchart.terminator = blockdiag.noderenderer.flowchart.terminator
textbox = blockdiag.noderenderer.textbox
dots = blockdiag.noderenderer.dots
none = blockdiag.noderenderer.none
[blockdiag_plugins]
attributes = blockdiag.plugins.attributes
autoclass = blockdiag.plugins.autoclass
[blockdiag_imagedrawers]
imagedraw_png = blockdiag.imagedraw.png
imagedraw_svg = blockdiag.imagedraw.svg
imagedraw_pdf = blockdiag.imagedraw.pdf
""",
)
|
from __future__ import absolute_import
from google.cloud.aiplatform.utils.enhanced_library import value_converter
from proto.marshal import Marshal
from proto.marshal.rules.struct import ValueRule
from google.protobuf.struct_pb2 import Value
class ConversionValueRule(ValueRule):
def to_python(self, value, *, absent: bool = None):
return super().to_python(value, absent=absent)
def to_proto(self, value):
# Need to check whether value is an instance
# of an enhanced type
if callable(getattr(value, "to_value", None)):
return value.to_value()
else:
return super().to_proto(value)
def _add_methods_to_classes_in_package(pkg):
classes = dict(
[(name, cls) for name, cls in pkg.__dict__.items() if isinstance(cls, type)]
)
for class_name, cls in classes.items():
# Add to_value() method to class with docstring
setattr(cls, "to_value", value_converter.to_value)
cls.to_value.__doc__ = value_converter.to_value.__doc__
# Add from_value() method to class with docstring
setattr(cls, "from_value", _add_from_value_to_class(cls))
cls.from_value.__doc__ = value_converter.from_value.__doc__
# Add from_map() method to class with docstring
setattr(cls, "from_map", _add_from_map_to_class(cls))
cls.from_map.__doc__ = value_converter.from_map.__doc__
def _add_from_value_to_class(cls):
def _from_value(value):
return value_converter.from_value(cls, value)
return _from_value
def _add_from_map_to_class(cls):
def _from_map(map_):
return value_converter.from_map(cls, map_)
return _from_map
marshal = Marshal(name="google.cloud.aiplatform.v1beta1")
marshal.register(Value, ConversionValueRule(marshal=marshal))
marshal = Marshal(name="google.cloud.aiplatform.v1")
marshal.register(Value, ConversionValueRule(marshal=marshal))
|
"""VTA specific buildin for runtime."""
from __future__ import absolute_import as _abs
import tvm
from . import ir_pass
from .environment import get_env
def lift_coproc_scope(x):
"""Lift coprocessings cope to the """
x = ir_pass.lift_alloc_to_scope_begin(x)
x = tvm.ir_pass.LiftAttrScope(x, "coproc_scope", False)
return x
def early_rewrite(stmt):
"""Try to do storage rewrite in early pass."""
try:
return tvm.ir_pass.StorageRewrite(stmt)
except tvm.TVMError:
return stmt
def build_config(debug_flag=0, **kwargs):
"""Build a build config for VTA.
Parameters
----------
debug_flag : int
The dbeug flag to be passed.
kwargs : dict
Additional configurations.
Returns
-------
build_config: BuildConfig
The build config that can be used in TVM.
Example
--------
.. code-block:: python
# build a vta module.
with vta.build_config():
vta_module = tvm.build(s, ...)
"""
env = get_env()
def add_debug(stmt):
debug = tvm.call_extern(
"int32", "VTASetDebugMode",
env.dev.command_handle,
debug_flag)
return tvm.make.stmt_seq(debug, stmt)
pass_list = [(0, ir_pass.inject_conv2d_transpose_skip),
(1, ir_pass.inject_dma_intrin),
(1, ir_pass.inject_skip_copy),
(1, ir_pass.annotate_alu_coproc_scope),
(1, lambda x: tvm.ir_pass.LiftAttrScope(x, "coproc_uop_scope", True)),
(1, lift_coproc_scope),
(1, ir_pass.inject_coproc_sync),
(1, early_rewrite)]
if debug_flag:
pass_list.append((1, add_debug))
pass_list.append((2, ir_pass.inject_alu_intrin))
pass_list.append((3, ir_pass.fold_uop_loop))
pass_list.append((3, ir_pass.cpu_access_rewrite))
return tvm.build_config(add_lower_pass=pass_list, **kwargs)
def lower(*args, **kwargs):
"""Thin wrapper of tvm.lower
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.lower : The original TVM's lower function
"""
cfg = tvm.build_module.current_build_config()
if not cfg.add_lower_pass:
with build_config():
return tvm.lower(*args, **kwargs)
return tvm.lower(*args, **kwargs)
def build(*args, **kwargs):
"""Thin wrapper of tvm.build
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.build : The original TVM's build function
"""
cfg = tvm.build_module.current_build_config()
if not cfg.add_lower_pass:
with build_config():
return tvm.build(*args, **kwargs)
return tvm.build(*args, **kwargs)
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.solar_collectors import SolarCollectorPerformanceFlatPlate
log = logging.getLogger(__name__)
class TestSolarCollectorPerformanceFlatPlate(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_solarcollectorperformanceflatplate(self):
pyidf.validation_level = ValidationLevel.error
obj = SolarCollectorPerformanceFlatPlate()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_gross_area = 0.0001
obj.gross_area = var_gross_area
# alpha
var_test_fluid = "Water"
obj.test_fluid = var_test_fluid
# real
var_test_flow_rate = 0.0001
obj.test_flow_rate = var_test_flow_rate
# alpha
var_test_correlation_type = "Inlet"
obj.test_correlation_type = var_test_correlation_type
# real
var_coefficient_1_of_efficiency_equation = 6.6
obj.coefficient_1_of_efficiency_equation = var_coefficient_1_of_efficiency_equation
# real
var_coefficient_2_of_efficiency_equation = 7.7
obj.coefficient_2_of_efficiency_equation = var_coefficient_2_of_efficiency_equation
# real
var_coefficient_3_of_efficiency_equation = 8.8
obj.coefficient_3_of_efficiency_equation = var_coefficient_3_of_efficiency_equation
# real
var_coefficient_2_of_incident_angle_modifier = 9.9
obj.coefficient_2_of_incident_angle_modifier = var_coefficient_2_of_incident_angle_modifier
# real
var_coefficient_3_of_incident_angle_modifier = 10.1
obj.coefficient_3_of_incident_angle_modifier = var_coefficient_3_of_incident_angle_modifier
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.solarcollectorperformanceflatplates[0].name, var_name)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].gross_area, var_gross_area)
self.assertEqual(idf2.solarcollectorperformanceflatplates[0].test_fluid, var_test_fluid)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].test_flow_rate, var_test_flow_rate)
self.assertEqual(idf2.solarcollectorperformanceflatplates[0].test_correlation_type, var_test_correlation_type)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].coefficient_1_of_efficiency_equation, var_coefficient_1_of_efficiency_equation)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].coefficient_2_of_efficiency_equation, var_coefficient_2_of_efficiency_equation)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].coefficient_3_of_efficiency_equation, var_coefficient_3_of_efficiency_equation)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].coefficient_2_of_incident_angle_modifier, var_coefficient_2_of_incident_angle_modifier)
self.assertAlmostEqual(idf2.solarcollectorperformanceflatplates[0].coefficient_3_of_incident_angle_modifier, var_coefficient_3_of_incident_angle_modifier)
|
from solution import Solution
input = 1534236469
sol = Solution()
result = sol.reverse(input)
print(result)
|
import logging
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db import models
from django.db.models import Case, Q, Sum, Value, When
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
from zentral.contrib.osquery.forms import QueryForm, QuerySearchForm
from zentral.contrib.osquery.models import PackQuery, Query
logger = logging.getLogger('zentral.contrib.osquery.views.queries')
class QueryListView(PermissionRequiredMixin, ListView):
permission_required = "osquery.view_query"
paginate_by = 50
model = Query
def get(self, request, *args, **kwargs):
self.form = QuerySearchForm(request.GET)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["form"] = self.form
page = ctx["page_obj"]
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
ctx['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
ctx['previous_url'] = "?{}".format(qd.urlencode())
if page.number > 1:
qd = self.request.GET.copy()
qd.pop('page', None)
ctx['reset_link'] = "?{}".format(qd.urlencode())
return ctx
class CreateQueryView(PermissionRequiredMixin, CreateView):
permission_required = "osquery.add_query"
model = Query
form_class = QueryForm
class QueryView(PermissionRequiredMixin, DetailView):
permission_required = "osquery.view_query"
def get_queryset(self):
return Query.objects.select_related("compliance_check").prefetch_related("packquery__pack")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
try:
ctx["pack_query"] = self.object.packquery
except PackQuery.DoesNotExist:
ctx["pack_query"] = None
match_value = Value(1, output_field=models.IntegerField())
miss_value = Value(0, output_field=models.IntegerField())
ctx["distributed_queries"] = (
self.object.distributedquery_set
.annotate(in_flight_count=Sum(
Case(When(Q(distributedquerymachine__serial_number__isnull=False) &
Q(distributedquerymachine__status__isnull=True),
then=match_value), default=miss_value)
))
.annotate(ok_count=Sum(
Case(When(distributedquerymachine__status=0, then=match_value), default=miss_value)
))
.annotate(error_count=Sum(
Case(When(distributedquerymachine__status__gte=1, then=match_value), default=miss_value)
))
.order_by("-pk")
)
ctx["distributed_query_count"] = ctx["distributed_queries"].count()
return ctx
class UpdateQueryView(PermissionRequiredMixin, UpdateView):
permission_required = "osquery.change_query"
model = Query
form_class = QueryForm
class DeleteQueryView(PermissionRequiredMixin, DeleteView):
permission_required = "osquery.delete_query"
model = Query
success_url = reverse_lazy("osquery:queries")
|
import unittest
import hoverpy.tests
import doctest
def runTests():
"Run all of the tests when run as a module with -m."
suite = hoverpy.tests.get_suite()
runner = unittest.TextTestRunner()
runner.run(suite)
def runDocTests():
finder = doctest.DocTestFinder(exclude_empty=False)
suite = doctest.DocTestSuite(test_finder=finder)
def main():
runTests()
if __name__ == '__main__':
main()
|
"""
Setup script for pywbemtools project.
"""
import os
import io
import re
import setuptools
def get_version(version_file):
"""
Execute the specified version file and return the value of the __version__
global variable that is set in the version file.
Note: Make sure the version file does not depend on any packages in the
requirements list of this package (otherwise it cannot be executed in
a fresh Python environment).
"""
with io.open(version_file, 'r', encoding='utf-8') as fp:
version_source = fp.read()
_globals = {}
exec(version_source, _globals) # pylint: disable=exec-used
return _globals['__version__']
def get_requirements(requirements_file):
"""
Parse the specified requirements file and return a list of its non-empty,
non-comment lines. The returned lines are without any trailing newline
characters.
"""
with io.open(requirements_file, 'r', encoding='utf-8') as fp:
lines = fp.readlines()
reqs = []
for line in lines:
line = line.strip('\n')
if not line.startswith('#') and line != '':
reqs.append(line)
return reqs
def read_file(a_file):
"""
Read the specified file and return its content as one string.
"""
with io.open(a_file, 'r', encoding='utf-8') as fp:
content = fp.read()
return content
requirements = get_requirements('requirements.txt')
install_requires = [req for req in requirements
if req and not re.match(r'[^:]+://', req)]
dependency_links = [req for req in requirements
if req and re.match(r'[^:]+://', req)]
package_version = get_version(os.path.join('pywbemtools', '_version.py'))
setuptools.setup(
name='pywbemtools',
version=package_version,
packages=[
'pywbemtools',
],
entry_points={
'console_scripts': [
'pywbemcli = pywbemtools.pywbemcli.pywbemcli:cli',
'pywbemlistener = pywbemtools.pywbemlistener.pywbemlistener:cli',
],
},
include_package_data=True, # as specified in MANIFEST.in
install_requires=install_requires,
dependency_links=dependency_links,
description='Python client tools to work with WBEM Servers using the '
'PyWBEM API.',
long_description=read_file('README_PYPI.rst'),
long_description_content_type='text/x-rst',
license='Apache License, Version 2.0',
author='Karl Schopmeyer, Andreas Maier',
author_email='k.schopmeyer@swbell.net, maiera@de.ibm.com',
maintainer='Karl Schopmeyer, Andreas Maier',
maintainer_email='k.schopmeyer@swbell.net, maiera@de.ibm.com',
url='https://github.com/pywbem/pywbemtools',
project_urls={
'Bug Tracker': 'https://github.com/pywbem/pywbemtools/issues',
'Documentation': 'https://pywbemtools.readthedocs.io/en/latest/',
'Source Code': 'https://github.com/pywbem/pywbemtools',
},
options={'bdist_wheel': {'universal': True}},
zip_safe=True, # This package can safely be installed from a zip file
platforms='any',
# Keep these Python versions in sync with pywbemtools/__init__.py
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
]
)
|
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume.drivers.dell_emc.vnx import utils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
from storops.lib import tasks as storops_tasks
LOG = logging.getLogger(__name__)
class Condition(object):
"""Defines some condition checker which are used in wait_until, .etc."""
@staticmethod
def is_lun_io_ready(lun):
utils.update_res_without_poll(lun)
if not lun.existed:
return False
lun_state = lun.state
if lun_state == common.LUNState.INITIALIZING:
return False
elif lun_state in [common.LUNState.READY,
common.LUNState.FAULTED]:
return lun.operation == 'None'
else:
# Quick exit wait_until when the lun is other state to avoid
# long-time timeout.
msg = (_('Volume %(name)s was created in VNX, '
'but in %(state)s state.') % {
'name': lun.name, 'state': lun_state})
raise exception.VolumeBackendAPIException(data=msg)
@staticmethod
def is_object_existed(vnx_obj):
utils.update_res_without_poll(vnx_obj)
return vnx_obj.existed
@staticmethod
def is_lun_ops_ready(lun):
utils.update_res_without_poll(lun)
return 'None' == lun.operation
@staticmethod
def is_lun_expanded(lun, new_size):
utils.update_res_without_poll(lun)
return new_size == lun.total_capacity_gb
@staticmethod
def is_mirror_synced(mirror):
utils.update_res_without_poll(mirror)
return (
mirror.secondary_image.state ==
storops.VNXMirrorImageState.SYNCHRONIZED)
class Client(object):
def __init__(self, ip, username, password, scope,
naviseccli, sec_file, queue_path=None):
self.naviseccli = naviseccli
if not storops:
msg = _('storops Python library is not installed.')
raise exception.VolumeBackendAPIException(message=msg)
self.vnx = storops.VNXSystem(ip=ip,
username=username,
password=password,
scope=scope,
naviseccli=naviseccli,
sec_file=sec_file)
self.sg_cache = {}
if queue_path:
self.queue = storops_tasks.PQueue(path=queue_path)
self.queue.start()
LOG.info('PQueue[%s] starts now.', queue_path)
def create_lun(self, pool, name, size, provision,
tier, cg_id=None, ignore_thresholds=False,
qos_specs=None):
pool = self.vnx.get_pool(name=pool)
try:
lun = pool.create_lun(lun_name=name,
size_gb=size,
provision=provision,
tier=tier,
ignore_thresholds=ignore_thresholds)
except storops_ex.VNXLunNameInUseError:
lun = self.vnx.get_lun(name=name)
utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun)
if cg_id:
cg = self.vnx.get_cg(name=cg_id)
cg.add_member(lun)
ioclasses = self.get_ioclass(qos_specs)
if ioclasses:
policy, is_new = self.get_running_policy()
for one in ioclasses:
one.add_lun(lun)
policy.add_class(one)
if is_new:
policy.run_policy()
return lun
def get_lun(self, name=None, lun_id=None):
return self.vnx.get_lun(name=name, lun_id=lun_id)
def get_lun_id(self, volume):
"""Retrieves the LUN ID of volume."""
if volume.provider_location:
return int(utils.extract_provider_location(
volume.provider_location, 'id'))
else:
# In some cases, cinder will not update volume info in DB with
# provider_location returned by us. We need to retrieve the id
# from array. For example, cinder backup-create doesn't use the
# provider_location returned from create_cloned_volume.
lun = self.get_lun(name=volume.name)
return lun.lun_id
def delete_lun(self, name, force=False):
"""Deletes a LUN or mount point."""
lun = self.get_lun(name=name)
smp_attached_snap = (lun.attached_snapshot if lun.is_snap_mount_point
else None)
try:
# Do not delete the snapshots of the lun.
lun.delete(force_detach=True, detach_from_sg=force)
if smp_attached_snap:
smp_attached_snap.delete()
except storops_ex.VNXLunNotFoundError as ex:
LOG.info("LUN %(name)s is already deleted. This message can "
"be safely ignored. Message: %(msg)s",
{'name': name, 'msg': ex.message})
def cleanup_async_lun(self, name, force=False):
"""Helper method to cleanup stuff for async migration.
.. note::
Only call it when VNXLunUsedByFeatureError occurs
"""
lun = self.get_lun(name=name)
self.cleanup_migration(src_id=lun.lun_id)
lun.delete(force_detach=True, detach_from_sg=force)
def delay_delete_lun(self, name):
"""Delay the deletion by putting it in a storops queue."""
self.queue.put(self.vnx.delete_lun, name=name)
LOG.info("VNX object has been added to queue for later"
" deletion: %s", name)
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
backoff_rate=1)
def expand_lun(self, name, new_size, poll=True):
lun = self.get_lun(name=name)
try:
lun.poll = poll
lun.expand(new_size, ignore_thresholds=True)
except storops_ex.VNXLunExpandSizeError as ex:
LOG.warning("LUN %(name)s is already expanded. "
"Message: %(msg)s.",
{'name': name, 'msg': ex.message})
except storops_ex.VNXLunPreparingError as ex:
# The error means the operation cannot be performed because the LUN
# is 'Preparing'. Wait for a while so that the LUN may get out of
# the transitioning state.
with excutils.save_and_reraise_exception():
LOG.warning("LUN %(name)s is not ready for extension: %(msg)s",
{'name': name, 'msg': ex.message})
utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
utils.wait_until(Condition.is_lun_expanded, lun=lun, new_size=new_size)
def modify_lun(self):
pass
@cinder_utils.retry(exceptions=const.VNXTargetNotReadyError,
interval=15,
retries=5, backoff_rate=1)
def migrate_lun(self, src_id, dst_id,
rate=const.MIGRATION_RATE_HIGH):
src = self.vnx.get_lun(lun_id=src_id)
src.migrate(dst_id, rate)
def session_finished(self, src_lun):
session = self.vnx.get_migration_session(src_lun)
if not session.existed:
return True
elif session.current_state in ('FAULTED', 'STOPPED'):
LOG.warning('Session is %s, need to handled then.',
session.current_state)
return True
else:
return False
def verify_migration(self, src_id, dst_id, dst_wwn):
"""Verify whether migration session finished successfully.
:param src_id: source LUN id
:param dst_id: destination LUN id
:param dst_wwn: destination LUN WWN
:returns Boolean: True or False
"""
src_lun = self.vnx.get_lun(lun_id=src_id)
utils.wait_until(condition=self.session_finished,
interval=common.INTERVAL_30_SEC,
src_lun=src_lun)
new_lun = self.vnx.get_lun(lun_id=dst_id)
new_wwn = new_lun.wwn
if not new_wwn or new_wwn != dst_wwn:
return True
else:
return False
def cleanup_migration(self, src_id, dst_id=None):
"""Invoke when migration meets error.
:param src_id: source LUN id
:param dst_id: destination LUN id
"""
# if migration session is still there
# we need to cancel the session
session = self.vnx.get_migration_session(src_id)
src_lun = self.vnx.get_lun(lun_id=src_id)
if session.existed:
LOG.warning('Cancelling migration session: '
'%(src_id)s -> %(dst_id)s.',
{'src_id': src_id,
'dst_id': dst_id})
try:
src_lun.cancel_migrate()
except storops_ex.VNXLunNotMigratingError:
LOG.info('The LUN is not migrating or completed, '
'this message can be safely ignored')
except (storops_ex.VNXLunSyncCompletedError,
storops_ex.VNXMigrationError):
# Wait until session finishes
self.verify_migration(src_id, session.dest_lu_id, None)
def create_snapshot(self, lun_id, snap_name, keep_for=None):
"""Creates a snapshot."""
lun = self.get_lun(lun_id=lun_id)
try:
lun.create_snap(
snap_name, allow_rw=True, auto_delete=False,
keep_for=keep_for)
except storops_ex.VNXSnapNameInUseError as ex:
LOG.warning('Snapshot %(name)s already exists. '
'Message: %(msg)s',
{'name': snap_name, 'msg': ex.message})
def delete_snapshot(self, snapshot_name):
"""Deletes a snapshot."""
snap = self.vnx.get_snap(name=snapshot_name)
try:
snap.delete()
except storops_ex.VNXSnapNotExistsError as ex:
LOG.warning("Snapshot %(name)s may be deleted already. "
"Message: %(msg)s",
{'name': snapshot_name, 'msg': ex.message})
except storops_ex.VNXDeleteAttachedSnapError as ex:
with excutils.save_and_reraise_exception():
LOG.warning("Failed to delete snapshot %(name)s "
"which is in use. Message: %(msg)s",
{'name': snapshot_name, 'msg': ex.message})
def copy_snapshot(self, snap_name, new_snap_name):
snap = self.vnx.get_snap(name=snap_name)
snap.copy(new_name=new_snap_name)
def create_mount_point(self, lun_name, smp_name):
lun = self.vnx.get_lun(name=lun_name)
try:
return lun.create_mount_point(name=smp_name)
except storops_ex.VNXLunNameInUseError as ex:
LOG.warning('Mount point %(name)s already exists. '
'Message: %(msg)s',
{'name': smp_name, 'msg': ex.message})
# Ignore the failure that due to retry.
return self.vnx.get_lun(name=smp_name)
def attach_snapshot(self, smp_name, snap_name):
lun = self.vnx.get_lun(name=smp_name)
try:
lun.attach_snap(snap=snap_name)
except storops_ex.VNXSnapAlreadyMountedError as ex:
LOG.warning("Snapshot %(snap_name)s is attached to "
"snapshot mount point %(smp_name)s already. "
"Message: %(msg)s",
{'snap_name': snap_name,
'smp_name': smp_name,
'msg': ex.message})
def detach_snapshot(self, smp_name):
lun = self.vnx.get_lun(name=smp_name)
try:
lun.detach_snap()
except storops_ex.VNXSnapNotAttachedError as ex:
LOG.warning("Snapshot mount point %(smp_name)s is not "
"currently attached. Message: %(msg)s",
{'smp_name': smp_name, 'msg': ex.message})
def modify_snapshot(self, snap_name, allow_rw=None,
auto_delete=None, keep_for=None):
snap = self.vnx.get_snap(name=snap_name)
snap.modify(allow_rw=allow_rw, auto_delete=auto_delete,
keep_for=None)
def create_consistency_group(self, cg_name, lun_id_list=None):
try:
cg = self.vnx.create_cg(name=cg_name, members=lun_id_list)
except storops_ex.VNXConsistencyGroupNameInUseError:
cg = self.vnx.get_cg(name=cg_name)
# Wait until cg is found on VNX, or deletion will fail afterwards
utils.wait_until(Condition.is_object_existed, vnx_obj=cg)
return cg
def delete_consistency_group(self, cg_name):
cg = self.vnx.get_cg(cg_name)
try:
cg.delete()
except storops_ex.VNXConsistencyGroupNotFoundError:
pass
def create_cg_snapshot(self, cg_snap_name, cg_name):
cg = self.vnx.get_cg(cg_name)
try:
snap = cg.create_snap(cg_snap_name, allow_rw=True)
except storops_ex.VNXSnapNameInUseError:
snap = self.vnx.get_snap(cg_snap_name)
utils.wait_until(Condition.is_object_existed,
vnx_obj=snap)
return snap
def delete_cg_snapshot(self, cg_snap_name):
self.delete_snapshot(cg_snap_name)
def get_serial(self):
return self.vnx.serial
def get_pools(self):
return self.vnx.get_pool()
def get_pool(self, name):
return self.vnx.get_pool(name=name)
def get_iscsi_targets(self, sp=None, port_id=None, vport_id=None):
return self.vnx.get_iscsi_port(sp=sp, port_id=port_id,
vport_id=vport_id,
has_ip=True)
def get_fc_targets(self, sp=None, port_id=None):
return self.vnx.get_fc_port(sp=sp, port_id=port_id)
def get_enablers(self):
return self.vnx.get_ndu()
def is_fast_enabled(self):
return self.vnx.is_auto_tiering_enabled()
def is_compression_enabled(self):
return self.vnx.is_compression_enabled()
def is_dedup_enabled(self):
return self.vnx.is_dedup_enabled()
def is_fast_cache_enabled(self):
return self.vnx.is_fast_cache_enabled()
def is_thin_enabled(self):
return self.vnx.is_thin_enabled()
def is_snap_enabled(self):
return self.vnx.is_snap_enabled()
def is_mirror_view_enabled(self):
return self.vnx.is_mirror_view_sync_enabled()
def get_pool_feature(self):
return self.vnx.get_pool_feature()
def lun_has_snapshot(self, lun):
"""Checks lun has snapshot.
:param lun: instance of VNXLun
"""
snaps = lun.get_snap()
return len(snaps) != 0
def enable_compression(self, lun):
"""Enables compression on lun.
:param lun: instance of VNXLun
"""
try:
lun.enable_compression(ignore_thresholds=True)
except storops_ex.VNXCompressionAlreadyEnabledError:
LOG.warning("Compression has already been enabled on %s.",
lun.name)
def get_vnx_enabler_status(self):
return common.VNXEnablerStatus(
dedup=self.is_dedup_enabled(),
compression=self.is_compression_enabled(),
thin=self.is_thin_enabled(),
fast=self.is_fast_enabled(),
snap=self.is_snap_enabled())
def create_storage_group(self, name):
try:
self.sg_cache[name] = self.vnx.create_sg(name)
except storops_ex.VNXStorageGroupNameInUseError as ex:
# Ignore the failure due to retry
LOG.warning('Storage group %(name)s already exists. '
'Message: %(msg)s',
{'name': name, 'msg': ex.message})
self.sg_cache[name] = self.vnx.get_sg(name=name)
return self.sg_cache[name]
def get_storage_group(self, name):
"""Retrieve the storage group by name.
Check the storage group instance cache first to save
CLI call.
If the specified storage group doesn't exist in the cache,
try to grab it from CLI.
:param name: name of the storage group
:return: storage group instance
"""
if name not in self.sg_cache:
self.sg_cache[name] = self.vnx.get_sg(name)
return self.sg_cache[name]
def register_initiator(self, storage_group, host, initiator_port_map):
"""Registers the initiators of `host` to the `storage_group`.
:param storage_group: the storage group object.
:param host: the ip and name information of the initiator.
:param initiator_port_map: the dict specifying which initiators are
bound to which ports.
"""
for (initiator_id, ports_to_bind) in initiator_port_map.items():
for port in ports_to_bind:
try:
storage_group.connect_hba(port, initiator_id, host.name,
host_ip=host.ip)
except storops_ex.VNXStorageGroupError as ex:
LOG.warning('Failed to set path to port %(port)s for '
'initiator %(hba_id)s. Message: %(msg)s',
{'port': port, 'hba_id': initiator_id,
'msg': ex.message})
if any(initiator_port_map.values()):
LOG.debug('New path set for initiator %(hba_id)s, so update '
'storage group with poll.', {'hba_id': initiator_id})
utils.update_res_with_poll(storage_group)
def ping_node(self, port, ip_address):
iscsi_port = self.get_iscsi_targets(sp=port.sp,
port_id=port.port_id,
vport_id=port.vport_id)
try:
iscsi_port.ping_node(ip_address, count=1)
return True
except storops_ex.VNXPingNodeError:
return False
def add_lun_to_sg(self, storage_group, lun, max_retries):
"""Adds the `lun` to `storage_group`."""
try:
return storage_group.attach_alu(lun, max_retries)
except storops_ex.VNXAluAlreadyAttachedError as ex:
# Ignore the failure due to retry.
return storage_group.get_hlu(lun)
except storops_ex.VNXNoHluAvailableError as ex:
with excutils.save_and_reraise_exception():
# Reach the max times of retry, fail the attach action.
LOG.error('Failed to add %(lun)s into %(sg)s after '
'%(tried)s tries. Reach the max retry times. '
'Message: %(msg)s',
{'lun': lun.lun_id, 'sg': storage_group.name,
'tried': max_retries, 'msg': ex.message})
def get_wwn_of_online_fc_ports(self, ports):
"""Returns wwns of online fc ports.
wwn of a certain port will not be included in the return list when it
is not present or down.
"""
wwns = set()
ports_with_all_info = self.vnx.get_fc_port()
for po in ports:
online_list = [p for p in ports_with_all_info if p == po and
p.link_status == 'Up' and p.port_status == 'Online']
wwns.update([p.wwn for p in online_list])
return list(wwns)
def sg_has_lun_attached(self, sg):
return bool(sg.get_alu_hlu_map())
def deregister_initiators(self, initiators):
if not isinstance(initiators, list):
initiators = [initiators]
for initiator_uid in initiators:
self.vnx.remove_hba(initiator_uid)
def update_consistencygroup(self, cg, lun_ids_to_add, lun_ids_to_remove):
lun_ids_in_cg = (set([l.lun_id for l in cg.lun_list]) if cg.lun_list
else set())
# lun_ids_to_add and lun_ids_to_remove never overlap.
lun_ids_updated = ((lun_ids_in_cg | set(lun_ids_to_add)) -
set(lun_ids_to_remove))
if lun_ids_updated:
cg.replace_member(*[self.get_lun(lun_id=lun_id)
for lun_id in lun_ids_updated])
else:
# Need to remove all LUNs from cg. However, replace_member cannot
# handle empty list. So use delete_member.
cg.delete_member(*[self.get_lun(lun_id=lun_id)
for lun_id in lun_ids_in_cg])
def get_cg(self, name):
return self.vnx.get_cg(name=name)
def get_available_ip(self):
return self.vnx.alive_sp_ip
def get_mirror(self, mirror_name):
return self.vnx.get_mirror_view(mirror_name)
def create_mirror(self, mirror_name, primary_lun_id):
src_lun = self.vnx.get_lun(lun_id=primary_lun_id)
try:
mv = self.vnx.create_mirror_view(mirror_name, src_lun)
except storops_ex.VNXMirrorNameInUseError:
mv = self.vnx.get_mirror_view(mirror_name)
return mv
def delete_mirror(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
try:
mv.delete()
except storops_ex.VNXMirrorNotFoundError:
pass
def add_image(self, mirror_name, sp_ip, secondary_lun_id):
mv = self.vnx.get_mirror_view(mirror_name)
mv.add_image(sp_ip, secondary_lun_id)
# Secondary image info usually did not appear, so
# here add a poll to update.
utils.update_res_with_poll(mv)
utils.wait_until(Condition.is_mirror_synced, mirror=mv)
def remove_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.remove_image()
def fracture_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.fracture_image()
def sync_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.sync_image()
utils.wait_until(Condition.is_mirror_synced, mirror=mv)
def promote_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.promote_image()
def create_mirror_group(self, group_name):
try:
mg = self.vnx.create_mirror_group(group_name)
except storops_ex.VNXMirrorGroupNameInUseError:
mg = self.vnx.get_mirror_group(group_name)
return mg
def delete_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
try:
mg.delete()
except storops_ex.VNXMirrorGroupNotFoundError:
LOG.info('Mirror group %s was already deleted.', group_name)
def add_mirror(self, group_name, mirror_name):
mg = self.vnx.get_mirror_group(group_name)
mv = self.vnx.get_mirror_view(mirror_name)
try:
mg.add_mirror(mv)
except storops_ex.VNXMirrorGroupAlreadyMemberError:
LOG.info('Mirror %(mirror)s is already a member of %(group)s',
{'mirror': mirror_name, 'group': group_name})
return mg
def remove_mirror(self, group_name, mirror_name):
mg = self.vnx.get_mirror_group(group_name)
mv = self.vnx.get_mirror_view(mirror_name)
try:
mg.remove_mirror(mv)
except storops_ex.VNXMirrorGroupMirrorNotMemberError:
LOG.info('Mirror %(mirror)s is not a member of %(group)s',
{'mirror': mirror_name, 'group': group_name})
def promote_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
try:
mg.promote_group()
except storops_ex.VNXMirrorGroupAlreadyPromotedError:
LOG.info('Mirror group %s was already promoted.', group_name)
return mg
def sync_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
mg.sync_group()
def fracture_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
mg.fracture_group()
def get_pool_name(self, lun_name):
lun = self.get_lun(name=lun_name)
utils.update_res_without_poll(lun)
return lun.pool_name
def get_ioclass(self, qos_specs):
ioclasses = []
if qos_specs is not None:
prefix = qos_specs['id']
max_bws = qos_specs[common.QOS_MAX_BWS]
max_iops = qos_specs[common.QOS_MAX_IOPS]
if max_bws:
name = '%(prefix)s-bws-%(max)s' % {
'prefix': prefix, 'max': max_bws}
class_bws = self.vnx.get_ioclass(name=name)
if not class_bws.existed:
class_bws = self.create_ioclass_bws(name,
max_bws)
ioclasses.append(class_bws)
if max_iops:
name = '%(prefix)s-iops-%(max)s' % {
'prefix': prefix, 'max': max_iops}
class_iops = self.vnx.get_ioclass(name=name)
if not class_iops.existed:
class_iops = self.create_ioclass_iops(name,
max_iops)
ioclasses.append(class_iops)
return ioclasses
def create_ioclass_iops(self, name, max_iops):
"""Creates a ioclass by IOPS."""
max_iops = int(max_iops)
ctrl_method = storops.VNXCtrlMethod(
method=storops.VNXCtrlMethod.LIMIT_CTRL,
metric='tt', value=max_iops)
ioclass = self.vnx.create_ioclass(name=name, iotype='rw',
ctrlmethod=ctrl_method)
return ioclass
def create_ioclass_bws(self, name, max_bws):
"""Creates a ioclass by bandwidth in MiB."""
max_bws = int(max_bws)
ctrl_method = storops.VNXCtrlMethod(
method=storops.VNXCtrlMethod.LIMIT_CTRL,
metric='bw', value=max_bws)
ioclass = self.vnx.create_ioclass(name=name, iotype='rw',
ctrlmethod=ctrl_method)
return ioclass
def create_policy(self, policy_name):
"""Creates the policy and starts it."""
policy = self.vnx.get_policy(name=policy_name)
if not policy.existed:
LOG.info('Creating the policy: %s', policy_name)
policy = self.vnx.create_policy(name=policy_name)
return policy
def get_running_policy(self):
"""Returns the only running/measuring policy on VNX.
.. note: VNX only allows one running policy.
"""
policies = self.vnx.get_policy()
policies = list(filter(lambda p: p.state == "Running" or p.state ==
"Measuring", policies))
if len(policies) >= 1:
return policies[0], False
else:
return self.create_policy("vnx_policy"), True
def add_lun_to_ioclass(self, ioclass_name, lun_id):
ioclass = self.vnx.get_ioclass(name=ioclass_name)
ioclass.add_lun(lun_id)
|
"""empty message
Revision ID: b347b202819b
Revises: ('33d996bcc382', '65903709c321')
Create Date: 2016-09-19 17:22:40.138601
"""
revision = 'b347b202819b'
down_revision = ('33d996bcc382', '65903709c321')
def upgrade():
pass
def downgrade():
pass
|
import datetime as date
from com.hhj.pystock.snakecoin.block import Block
def create_genesis_block():
# Manually construct a block with
# index zero and arbitrary previous hash
return Block(0, date.datetime.now(), "Genesis Block", "0")
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = date.datetime.now()
this_data = "Hey! I'm block " + str(this_index)
this_hash = last_block.hash
return Block(this_index, this_timestamp, this_data, this_hash)
blockchain = [create_genesis_block()]
previous_block = blockchain[0]
num_of_blocks_to_add = 20
for i in range(0, num_of_blocks_to_add):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
# Tell everyone about it!
print("Block #{} has been added to the blockchain!".format(block_to_add.index))
print("Block data :{} !".format(block_to_add.data))
print("Hash: {}\n".format(block_to_add.hash))
|
""" Data objects in group "Humidifiers and Dehumidifiers"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class HumidifierSteamElectric(DataObject):
""" Corresponds to IDD object `Humidifier:Steam:Electric`
Electrically heated steam humidifier with fan.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated capacity',
{'name': u'Rated Capacity',
'pyname': u'rated_capacity',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'rated power',
{'name': u'Rated Power',
'pyname': u'rated_power',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'rated fan power',
{'name': u'Rated Fan Power',
'pyname': u'rated_fan_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'standby power',
{'name': u'Standby Power',
'pyname': u'standby_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'air inlet node name',
{'name': u'Air Inlet Node Name',
'pyname': u'air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'air outlet node name',
{'name': u'Air Outlet Node Name',
'pyname': u'air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'water storage tank name',
{'name': u'Water Storage Tank Name',
'pyname': u'water_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Humidifier:Steam:Electric',
'pyname': u'HumidifierSteamElectric',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def rated_capacity(self):
"""field `Rated Capacity`
| Capacity is m3/s of water at 5.05 C
| Units: m3/s
| IP-Units: gal/min
Args:
value (float or "Autosize"): value for IDD Field `Rated Capacity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_capacity` or None if not set
"""
return self["Rated Capacity"]
@rated_capacity.setter
def rated_capacity(self, value=None):
"""Corresponds to IDD field `Rated Capacity`"""
self["Rated Capacity"] = value
@property
def rated_power(self):
"""field `Rated Power`
| if autosized the rated power is calculated from the rated capacity
| and enthalpy rise of water from 20.0C to 100.0C steam and assumes
| electric to thermal energy conversion efficiency of 100.0%
| Units: W
| IP-Units: W
Args:
value (float or "Autosize"): value for IDD Field `Rated Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_power` or None if not set
"""
return self["Rated Power"]
@rated_power.setter
def rated_power(self, value=None):
"""Corresponds to IDD field `Rated Power`"""
self["Rated Power"] = value
@property
def rated_fan_power(self):
"""field `Rated Fan Power`
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rated Fan Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rated_fan_power` or None if not set
"""
return self["Rated Fan Power"]
@rated_fan_power.setter
def rated_fan_power(self, value=None):
"""Corresponds to IDD field `Rated Fan Power`"""
self["Rated Fan Power"] = value
@property
def standby_power(self):
"""field `Standby Power`
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Standby Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `standby_power` or None if not set
"""
return self["Standby Power"]
@standby_power.setter
def standby_power(self, value=None):
"""Corresponds to IDD field `Standby Power`"""
self["Standby Power"] = value
@property
def air_inlet_node_name(self):
"""field `Air Inlet Node Name`
Args:
value (str): value for IDD Field `Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_inlet_node_name` or None if not set
"""
return self["Air Inlet Node Name"]
@air_inlet_node_name.setter
def air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Inlet Node Name`"""
self["Air Inlet Node Name"] = value
@property
def air_outlet_node_name(self):
"""field `Air Outlet Node Name`
Args:
value (str): value for IDD Field `Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_outlet_node_name` or None if not set
"""
return self["Air Outlet Node Name"]
@air_outlet_node_name.setter
def air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Outlet Node Name`"""
self["Air Outlet Node Name"] = value
@property
def water_storage_tank_name(self):
"""field `Water Storage Tank Name`
Args:
value (str): value for IDD Field `Water Storage Tank Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `water_storage_tank_name` or None if not set
"""
return self["Water Storage Tank Name"]
@water_storage_tank_name.setter
def water_storage_tank_name(self, value=None):
"""Corresponds to IDD field `Water Storage Tank Name`"""
self["Water Storage Tank Name"] = value
class HumidifierSteamGas(DataObject):
""" Corresponds to IDD object `Humidifier:Steam:Gas`
Natural gas fired steam humidifier with optional blower fan.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated capacity',
{'name': u'Rated Capacity',
'pyname': u'rated_capacity',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'rated gas use rate',
{'name': u'Rated Gas Use Rate',
'pyname': u'rated_gas_use_rate',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'thermal efficiency',
{'name': u'Thermal Efficiency',
'pyname': u'thermal_efficiency',
'default': 0.8,
'minimum>': 0.0,
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'}),
(u'thermal efficiency modifier curve name',
{'name': u'Thermal Efficiency Modifier Curve Name',
'pyname': u'thermal_efficiency_modifier_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'rated fan power',
{'name': u'Rated Fan Power',
'pyname': u'rated_fan_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'auxiliary electric power',
{'name': u'Auxiliary Electric Power',
'pyname': u'auxiliary_electric_power',
'default': 0.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'air inlet node name',
{'name': u'Air Inlet Node Name',
'pyname': u'air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'air outlet node name',
{'name': u'Air Outlet Node Name',
'pyname': u'air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'water storage tank name',
{'name': u'Water Storage Tank Name',
'pyname': u'water_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'inlet water temperature option',
{'name': u'Inlet Water Temperature Option',
'pyname': u'inlet_water_temperature_option',
'default': u'FixedInletWaterTemperature',
'required-field': False,
'autosizable': False,
'accepted-values': [u'FixedInletWaterTemperature',
u'VariableInletWaterTemperature'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Humidifier:Steam:Gas',
'pyname': u'HumidifierSteamGas',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def rated_capacity(self):
"""field `Rated Capacity`
| Capacity is m3/s of water at 5.05 C
| The nominal full capacity water addition rate in m3/s of water at 5.05 C
| Units: m3/s
| IP-Units: gal/min
Args:
value (float or "Autosize"): value for IDD Field `Rated Capacity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_capacity` or None if not set
"""
return self["Rated Capacity"]
@rated_capacity.setter
def rated_capacity(self, value=None):
"""Corresponds to IDD field `Rated Capacity`"""
self["Rated Capacity"] = value
@property
def rated_gas_use_rate(self):
"""field `Rated Gas Use Rate`
| if auto-sized, the rated gas use rate is calculated from the rated
| capacity and enthalpy rise of water from 20.0C to 100.0C steam and user
| input thermal efficiency value specified in the next input field. If this
| input field is hard-sized and Inlet Water Temperature Option input field is
| selected as FixedInletWaterTemperature, then the thermal efficiency input
| field will not be used or else if the Inlet Water Temperature Option input
| field is selected as VariableInletWaterTemperature, then the thermal efficiency
| input value is overridden by a value calculated from the capacity, rated gas use
| rate and design condition.
| Units: W
| IP-Units: W
Args:
value (float or "Autosize"): value for IDD Field `Rated Gas Use Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float or "Autosize": the value of `rated_gas_use_rate` or None if not set
"""
return self["Rated Gas Use Rate"]
@rated_gas_use_rate.setter
def rated_gas_use_rate(self, value=None):
"""Corresponds to IDD field `Rated Gas Use Rate`"""
self["Rated Gas Use Rate"] = value
@property
def thermal_efficiency(self):
"""field `Thermal Efficiency`
| Based on the higher heating value of fuel.
| If "Rated Gas Use Rate" in the field above is not auto-sized and the Inlet Water
| Temperature Option input field is specified as FixedInletWaterTemperature, then the
| thermal efficiency specified will not be used in the calculation, or else if the
| Inlet Water Temperature Option input field is selected as VariableInletWaterTemperature,
| then the thermal efficiency value is overridden by a value calculated from the capacity,
| rated gas use rate and design condition.
| Units: dimensionless
| Default value: 0.8
| value <= 1.0
Args:
value (float): value for IDD Field `Thermal Efficiency`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `thermal_efficiency` or None if not set
"""
return self["Thermal Efficiency"]
@thermal_efficiency.setter
def thermal_efficiency(self, value=0.8):
"""Corresponds to IDD field `Thermal Efficiency`"""
self["Thermal Efficiency"] = value
@property
def thermal_efficiency_modifier_curve_name(self):
"""field `Thermal Efficiency Modifier Curve Name`
| Linear, Quadratic and Cubic efficiency curves are solely a function of PLR.
| Linear = C1 + C2*PLR
| Quadratic = C1 + C2*PLR + C3*PLR^2
| Cubic = C1 + C2*PLR + C3*PLR^2 + C4*PLR^3
| This is thermal efficiency modifier curve name of gas fired steam humidifier.
| This curve is normalized, i.e., curve output value at rated condition is 1.0.
Args:
value (str): value for IDD Field `Thermal Efficiency Modifier Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `thermal_efficiency_modifier_curve_name` or None if not set
"""
return self["Thermal Efficiency Modifier Curve Name"]
@thermal_efficiency_modifier_curve_name.setter
def thermal_efficiency_modifier_curve_name(self, value=None):
"""Corresponds to IDD field `Thermal Efficiency Modifier Curve Name`"""
self["Thermal Efficiency Modifier Curve Name"] = value
@property
def rated_fan_power(self):
"""field `Rated Fan Power`
| The nominal full capacity electric power input to the blower fan in Watts. If no
| blower fan is required to inject the dry steam to the supply air stream, then
| this input field is set to zero.
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rated Fan Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rated_fan_power` or None if not set
"""
return self["Rated Fan Power"]
@rated_fan_power.setter
def rated_fan_power(self, value=None):
"""Corresponds to IDD field `Rated Fan Power`"""
self["Rated Fan Power"] = value
@property
def auxiliary_electric_power(self):
"""field `Auxiliary Electric Power`
| The auxiliary electric power input in watts. This amount of power will be consumed
| whenever the unit is available (as defined by the availability schedule). This
| electric power is used for control purpose only.
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Auxiliary Electric Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `auxiliary_electric_power` or None if not set
"""
return self["Auxiliary Electric Power"]
@auxiliary_electric_power.setter
def auxiliary_electric_power(self, value=None):
"""Corresponds to IDD field `Auxiliary Electric Power`"""
self["Auxiliary Electric Power"] = value
@property
def air_inlet_node_name(self):
"""field `Air Inlet Node Name`
Args:
value (str): value for IDD Field `Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_inlet_node_name` or None if not set
"""
return self["Air Inlet Node Name"]
@air_inlet_node_name.setter
def air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Inlet Node Name`"""
self["Air Inlet Node Name"] = value
@property
def air_outlet_node_name(self):
"""field `Air Outlet Node Name`
Args:
value (str): value for IDD Field `Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `air_outlet_node_name` or None if not set
"""
return self["Air Outlet Node Name"]
@air_outlet_node_name.setter
def air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Air Outlet Node Name`"""
self["Air Outlet Node Name"] = value
@property
def water_storage_tank_name(self):
"""field `Water Storage Tank Name`
Args:
value (str): value for IDD Field `Water Storage Tank Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `water_storage_tank_name` or None if not set
"""
return self["Water Storage Tank Name"]
@water_storage_tank_name.setter
def water_storage_tank_name(self, value=None):
"""Corresponds to IDD field `Water Storage Tank Name`"""
self["Water Storage Tank Name"] = value
@property
def inlet_water_temperature_option(self):
"""field `Inlet Water Temperature Option`
| The inlet water temperature can be fixed at 20C as it is done for electric steam
| humidifier or it can be allowed to vary with temperature of the water source.
| Currently allowed water sources are main water or water storage tank in water use objects.
| if FixedInletWaterTemperature is specified, then a fixed 20C water temperature will be
| used, or else if VariableInletWaterTemperature is specified, then inlet water will vary
| depending the source water temperature. If this input field is left blank, then fixed
| inlet water temperature of 20C will be assumed.
| Default value: FixedInletWaterTemperature
Args:
value (str): value for IDD Field `Inlet Water Temperature Option`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `inlet_water_temperature_option` or None if not set
"""
return self["Inlet Water Temperature Option"]
@inlet_water_temperature_option.setter
def inlet_water_temperature_option(
self,
value="FixedInletWaterTemperature"):
"""Corresponds to IDD field `Inlet Water Temperature Option`"""
self["Inlet Water Temperature Option"] = value
class DehumidifierDesiccantNoFans(DataObject):
""" Corresponds to IDD object `Dehumidifier:Desiccant:NoFans`
This object models a solid desiccant dehumidifier. The process
air stream is the air which is dehumidified. The regeneration air
stream is the air which is heated to regenerate the desiccant.
This object determines the process air outlet conditions, the
load on the regeneration heating coil, the electric power consumption
for the wheel rotor motor, and the regeneration air fan mass flow rate.
All other heat exchangers are modeled as separate objects connected
to the inlet and outlet nodes of the dehumidifier. The solid
desiccant dehumidifier is typically used in an AirLoopHVAC:OutdoorAirSystem,
but can also be specified in any AirLoopHVAC.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': 'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'process air inlet node name',
{'name': u'Process Air Inlet Node Name',
'pyname': u'process_air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'process air outlet node name',
{'name': u'Process Air Outlet Node Name',
'pyname': u'process_air_outlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration air inlet node name',
{'name': u'Regeneration Air Inlet Node Name',
'pyname': u'regeneration_air_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration fan inlet node name',
{'name': u'Regeneration Fan Inlet Node Name',
'pyname': u'regeneration_fan_inlet_node_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'control type',
{'name': u'Control Type',
'pyname': u'control_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'LeavingMaximumHumidityRatioSetpoint',
u'SystemNodeMaximumHumidityRatioSetpoint'],
'autocalculatable': False,
'type': 'alpha'}),
(u'leaving maximum humidity ratio setpoint',
{'name': u'Leaving Maximum Humidity Ratio Setpoint',
'pyname': u'leaving_maximum_humidity_ratio_setpoint',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'kgWater/kgDryAir'}),
(u'nominal process air flow rate',
{'name': u'Nominal Process Air Flow Rate',
'pyname': u'nominal_process_air_flow_rate',
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'nominal process air velocity',
{'name': u'Nominal Process Air Velocity',
'pyname': u'nominal_process_air_velocity',
'minimum>': 0.0,
'maximum': 6.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm/s'}),
(u'rotor power',
{'name': u'Rotor Power',
'pyname': u'rotor_power',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'regeneration coil object type',
{'name': u'Regeneration Coil Object Type',
'pyname': u'regeneration_coil_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Heating:Electric',
u'Coil:Heating:Gas',
u'Coil:Heating:Water',
u'Coil:Heating:Steam'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration coil name',
{'name': u'Regeneration Coil Name',
'pyname': u'regeneration_coil_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration fan object type',
{'name': u'Regeneration Fan Object Type',
'pyname': u'regeneration_fan_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Fan:VariableVolume',
u'Fan:ConstantVolume'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration fan name',
{'name': u'Regeneration Fan Name',
'pyname': u'regeneration_fan_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'performance model type',
{'name': u'Performance Model Type',
'pyname': u'performance_model_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Default',
u'UserCurves'],
'autocalculatable': False,
'type': 'alpha'}),
(u'leaving dry-bulb function of entering dry-bulb and humidity ratio curve name',
{'name': u'Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving dry-bulb function of air velocity curve name',
{'name': u'Leaving Dry-Bulb Function of Air Velocity Curve Name',
'pyname': u'leaving_drybulb_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving humidity ratio function of entering dry-bulb and humidity ratio curve name',
{'name': u'Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'leaving humidity ratio function of air velocity curve name',
{'name': u'Leaving Humidity Ratio Function of Air Velocity Curve Name',
'pyname': u'leaving_humidity_ratio_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration energy function of entering dry-bulb and humidity ratio curve name',
{'name': u'Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration energy function of air velocity curve name',
{'name': u'Regeneration Energy Function of Air Velocity Curve Name',
'pyname': u'regeneration_energy_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration velocity function of entering dry-bulb and humidity ratio curve name',
{'name': u'Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name',
'pyname': u'regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration velocity function of air velocity curve name',
{'name': u'Regeneration Velocity Function of Air Velocity Curve Name',
'pyname': u'regeneration_velocity_function_of_air_velocity_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'nominal regeneration temperature',
{'name': u'Nominal Regeneration Temperature',
'pyname': u'nominal_regeneration_temperature',
'maximum': 250.0,
'required-field': False,
'autosizable': False,
'minimum': 40.0,
'autocalculatable': False,
'type': u'real',
'unit': u'C'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 0,
'name': u'Dehumidifier:Desiccant:NoFans',
'pyname': u'DehumidifierDesiccantNoFans',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def process_air_inlet_node_name(self):
"""field `Process Air Inlet Node Name`
| This is the node entering the process side of the desiccant wheel.
Args:
value (str): value for IDD Field `Process Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `process_air_inlet_node_name` or None if not set
"""
return self["Process Air Inlet Node Name"]
@process_air_inlet_node_name.setter
def process_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Process Air Inlet Node Name`"""
self["Process Air Inlet Node Name"] = value
@property
def process_air_outlet_node_name(self):
"""field `Process Air Outlet Node Name`
| This is the node leaving the process side of the desiccant wheel.
Args:
value (str): value for IDD Field `Process Air Outlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `process_air_outlet_node_name` or None if not set
"""
return self["Process Air Outlet Node Name"]
@process_air_outlet_node_name.setter
def process_air_outlet_node_name(self, value=None):
"""Corresponds to IDD field `Process Air Outlet Node Name`"""
self["Process Air Outlet Node Name"] = value
@property
def regeneration_air_inlet_node_name(self):
"""field `Regeneration Air Inlet Node Name`
| This is the node entering the regeneration side of the desiccant wheel
| after the regeneration coil.
Args:
value (str): value for IDD Field `Regeneration Air Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_inlet_node_name` or None if not set
"""
return self["Regeneration Air Inlet Node Name"]
@regeneration_air_inlet_node_name.setter
def regeneration_air_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Inlet Node Name`"""
self["Regeneration Air Inlet Node Name"] = value
@property
def regeneration_fan_inlet_node_name(self):
"""field `Regeneration Fan Inlet Node Name`
| Node for air entering the regeneration fan, mass flow is set
| by the desiccant dehumidifier module.
Args:
value (str): value for IDD Field `Regeneration Fan Inlet Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_inlet_node_name` or None if not set
"""
return self["Regeneration Fan Inlet Node Name"]
@regeneration_fan_inlet_node_name.setter
def regeneration_fan_inlet_node_name(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Inlet Node Name`"""
self["Regeneration Fan Inlet Node Name"] = value
@property
def control_type(self):
"""field `Control Type`
| Type of setpoint control:
| LeavingMaximumHumidityRatioSetpoint means that the unit is controlled
| to deliver air at the Leaving Max Humidity Ratio Setpoint (see below),
| SystemNodeMaximumHumidityRatioSetpoint means that the leaving humidity
| ratio setpoint is the System Node Humidity Ratio Max property
| of the Process Air Outlet Node. A Setpoint
| object must be used to control this setpoint.
| Both control types use bypass dampers to prevent over drying.
Args:
value (str): value for IDD Field `Control Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `control_type` or None if not set
"""
return self["Control Type"]
@control_type.setter
def control_type(self, value=None):
"""Corresponds to IDD field `Control Type`"""
self["Control Type"] = value
@property
def leaving_maximum_humidity_ratio_setpoint(self):
"""field `Leaving Maximum Humidity Ratio Setpoint`
| Fixed setpoint for maximum process air leaving humidity ratio
| Applicable only when Control Type = LeavingMaximumHumidityRatioSetpoint.
| Units: kgWater/kgDryAir
Args:
value (float): value for IDD Field `Leaving Maximum Humidity Ratio Setpoint`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `leaving_maximum_humidity_ratio_setpoint` or None if not set
"""
return self["Leaving Maximum Humidity Ratio Setpoint"]
@leaving_maximum_humidity_ratio_setpoint.setter
def leaving_maximum_humidity_ratio_setpoint(self, value=None):
"""Corresponds to IDD field `Leaving Maximum Humidity Ratio
Setpoint`"""
self["Leaving Maximum Humidity Ratio Setpoint"] = value
@property
def nominal_process_air_flow_rate(self):
"""field `Nominal Process Air Flow Rate`
| Process air flow rate at nominal conditions
| Units: m3/s
Args:
value (float): value for IDD Field `Nominal Process Air Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_process_air_flow_rate` or None if not set
"""
return self["Nominal Process Air Flow Rate"]
@nominal_process_air_flow_rate.setter
def nominal_process_air_flow_rate(self, value=None):
"""Corresponds to IDD field `Nominal Process Air Flow Rate`"""
self["Nominal Process Air Flow Rate"] = value
@property
def nominal_process_air_velocity(self):
"""field `Nominal Process Air Velocity`
| Process air velocity at nominal flow
| When using Performance Model Type of Default, must be 2.032 to 4.064 m/s (400 to 800 fpm)
| Units: m/s
| value <= 6.0
Args:
value (float): value for IDD Field `Nominal Process Air Velocity`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_process_air_velocity` or None if not set
"""
return self["Nominal Process Air Velocity"]
@nominal_process_air_velocity.setter
def nominal_process_air_velocity(self, value=None):
"""Corresponds to IDD field `Nominal Process Air Velocity`"""
self["Nominal Process Air Velocity"] = value
@property
def rotor_power(self):
"""field `Rotor Power`
| Power input to wheel rotor motor
| Units: W
| IP-Units: W
Args:
value (float): value for IDD Field `Rotor Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `rotor_power` or None if not set
"""
return self["Rotor Power"]
@rotor_power.setter
def rotor_power(self, value=None):
"""Corresponds to IDD field `Rotor Power`"""
self["Rotor Power"] = value
@property
def regeneration_coil_object_type(self):
"""field `Regeneration Coil Object Type`
| heating coil type
| works with gas, electric, hot water and steam heating coils
Args:
value (str): value for IDD Field `Regeneration Coil Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_coil_object_type` or None if not set
"""
return self["Regeneration Coil Object Type"]
@regeneration_coil_object_type.setter
def regeneration_coil_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Coil Object Type`"""
self["Regeneration Coil Object Type"] = value
@property
def regeneration_coil_name(self):
"""field `Regeneration Coil Name`
| Name of heating coil object for regeneration air
Args:
value (str): value for IDD Field `Regeneration Coil Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_coil_name` or None if not set
"""
return self["Regeneration Coil Name"]
@regeneration_coil_name.setter
def regeneration_coil_name(self, value=None):
"""Corresponds to IDD field `Regeneration Coil Name`"""
self["Regeneration Coil Name"] = value
@property
def regeneration_fan_object_type(self):
"""field `Regeneration Fan Object Type`
| Type of fan object for regeneration air. When using the Default
| Performance Model Type (see below), only Fan:VariableVolume is valid.
Args:
value (str): value for IDD Field `Regeneration Fan Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_object_type` or None if not set
"""
return self["Regeneration Fan Object Type"]
@regeneration_fan_object_type.setter
def regeneration_fan_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Object Type`"""
self["Regeneration Fan Object Type"] = value
@property
def regeneration_fan_name(self):
"""field `Regeneration Fan Name`
| Name of fan object for regeneration air
Args:
value (str): value for IDD Field `Regeneration Fan Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_fan_name` or None if not set
"""
return self["Regeneration Fan Name"]
@regeneration_fan_name.setter
def regeneration_fan_name(self, value=None):
"""Corresponds to IDD field `Regeneration Fan Name`"""
self["Regeneration Fan Name"] = value
@property
def performance_model_type(self):
"""field `Performance Model Type`
| Specifies whether the default performance model or user-specified
| curves should be used to model the performance. The default model
| is a generic solid desiccant wheel using performance curves of the form:
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*vel + C7*vel**2
| + C8*edb*ew + C9*edb**2*ew**2 + C10*edb*vel + C11*edb**2*vel**2
| + C12*ew*vel + C13*ew**2*vel**2 + C14*ALOG(edb) + C15*ALOG(ew) + C16*ALOG(vel)
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
| vel = process air velocity [m/s]
| If UserCurves are specified, then performance is calculated as follows:
| Leaving Dry-Bulb = (Leaving Dry-Bulb fTW Curve) * (Leaving Dry-Bulb fV Curve)
| Leaving Humidity Ratio = (Leaving Humidity Ratio fTW Curve) * (Leaving Humidity Ratio fV Curve)
| Regen Energy = (Regen Energy fTW Curve) * (Regen Energy fV Curve)
| Regen Velocity = (Regen Velocity fTW Curve) * (Regen Velocity fV Curve)
Args:
value (str): value for IDD Field `Performance Model Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `performance_model_type` or None if not set
"""
return self["Performance Model Type"]
@performance_model_type.setter
def performance_model_type(self, value=None):
"""Corresponds to IDD field `Performance Model Type`"""
self["Performance Model Type"] = value
@property
def leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Leaving dry-bulb of process air as a function of entering dry-bulb
| and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def leaving_drybulb_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Leaving Dry-Bulb Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def leaving_drybulb_function_of_air_velocity_curve_name(self):
"""field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
| Leaving dry-bulb of process air as a function of air velocity,
| quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_drybulb_function_of_air_velocity_curve_name` or None if not set
"""
return self["Leaving Dry-Bulb Function of Air Velocity Curve Name"]
@leaving_drybulb_function_of_air_velocity_curve_name.setter
def leaving_drybulb_function_of_air_velocity_curve_name(self, value=None):
""" Corresponds to IDD field `Leaving Dry-Bulb Function of Air Velocity Curve Name`
"""
self["Leaving Dry-Bulb Function of Air Velocity Curve Name"] = value
@property
def leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Leaving humidity ratio of process air as a function of entering dry-bulb
| and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def leaving_humidity_ratio_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Leaving Humidity Ratio Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def leaving_humidity_ratio_function_of_air_velocity_curve_name(self):
"""field `Leaving Humidity Ratio Function of Air Velocity Curve Name`
| Leaving humidity ratio of process air as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Leaving Humidity Ratio Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `leaving_humidity_ratio_function_of_air_velocity_curve_name` or None if not set
"""
return self[
"Leaving Humidity Ratio Function of Air Velocity Curve Name"]
@leaving_humidity_ratio_function_of_air_velocity_curve_name.setter
def leaving_humidity_ratio_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Leaving Humidity Ratio Function of Air
Velocity Curve Name`"""
self[
"Leaving Humidity Ratio Function of Air Velocity Curve Name"] = value
@property
def regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Regeneration energy [J/kg of water removed] as a function of
| entering dry-bulb and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def regeneration_energy_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Regeneration Energy Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def regeneration_energy_function_of_air_velocity_curve_name(self):
"""field `Regeneration Energy Function of Air Velocity Curve Name`
| Regeneration energy [J/kg of water removed] as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Regeneration Energy Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_energy_function_of_air_velocity_curve_name` or None if not set
"""
return self["Regeneration Energy Function of Air Velocity Curve Name"]
@regeneration_energy_function_of_air_velocity_curve_name.setter
def regeneration_energy_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Regeneration Energy Function of Air
Velocity Curve Name`"""
self["Regeneration Energy Function of Air Velocity Curve Name"] = value
@property
def regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self):
"""field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
| Regeneration velocity [m/s] as a function of
| entering dry-bulb and entering humidity ratio, biquadratic curve
| curve = C1 + C2*edb + C3*edb**2 + C4*ew + C5*ew**2 + C6*edb*ew
| edb = process entering dry-bulb temperature [C]
| ew = process entering humidity ratio [kgWater/kgDryAir]
Args:
value (str): value for IDD Field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name` or None if not set
"""
return self[
"Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name"]
@regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name.setter
def regeneration_velocity_function_of_entering_drybulb_and_humidity_ratio_curve_name(
self,
value=None):
""" Corresponds to IDD field `Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name`
"""
self[
"Regeneration Velocity Function of Entering Dry-Bulb and Humidity Ratio Curve Name"] = value
@property
def regeneration_velocity_function_of_air_velocity_curve_name(self):
"""field `Regeneration Velocity Function of Air Velocity Curve Name`
| Regeneration velocity [m/s] as a function of
| process air velocity, quadratic curve.
| curve = C1 + C2*v + C3*v**2
| v = process air velocity [m/s]
Args:
value (str): value for IDD Field `Regeneration Velocity Function of Air Velocity Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_velocity_function_of_air_velocity_curve_name` or None if not set
"""
return self[
"Regeneration Velocity Function of Air Velocity Curve Name"]
@regeneration_velocity_function_of_air_velocity_curve_name.setter
def regeneration_velocity_function_of_air_velocity_curve_name(
self,
value=None):
"""Corresponds to IDD field `Regeneration Velocity Function of Air
Velocity Curve Name`"""
self[
"Regeneration Velocity Function of Air Velocity Curve Name"] = value
@property
def nominal_regeneration_temperature(self):
"""field `Nominal Regeneration Temperature`
| Nominal regen temperature upon which the regen energy modifier
| curve is based. Not used if Default if chosen for the field Performance Mode Type.
| 121 C is a commonly used value.
| Units: C
| value >= 40.0
| value <= 250.0
Args:
value (float): value for IDD Field `Nominal Regeneration Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `nominal_regeneration_temperature` or None if not set
"""
return self["Nominal Regeneration Temperature"]
@nominal_regeneration_temperature.setter
def nominal_regeneration_temperature(self, value=None):
"""Corresponds to IDD field `Nominal Regeneration Temperature`"""
self["Nominal Regeneration Temperature"] = value
class DehumidifierDesiccantSystem(DataObject):
""" Corresponds to IDD object `Dehumidifier:Desiccant:System`
This compound object models a desiccant heat exchanger, an optional
heater, and associated fans. The process air stream is the air which
is dehumidified. The regeneration air stream is the air which is
heated to regenerate the desiccant. The desiccant heat exchanger
transfers both sensible and latent energy between the process and
regeneration air streams. The desiccant dehumidifier is typically used
in an AirLoopHVAC:OutdoorAirSystem, but can also be specified in any AirLoopHVAC.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'desiccant heat exchanger object type',
{'name': u'Desiccant Heat Exchanger Object Type',
'pyname': u'desiccant_heat_exchanger_object_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'HeatExchanger:Desiccant:BalancedFlow'],
'autocalculatable': False,
'type': 'alpha'}),
(u'desiccant heat exchanger name',
{'name': u'Desiccant Heat Exchanger Name',
'pyname': u'desiccant_heat_exchanger_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'sensor node name',
{'name': u'Sensor Node Name',
'pyname': u'sensor_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'regeneration air fan object type',
{'name': u'Regeneration Air Fan Object Type',
'pyname': u'regeneration_air_fan_object_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Fan:OnOff',
u'Fan:ConstantVolume'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air fan name',
{'name': u'Regeneration Air Fan Name',
'pyname': u'regeneration_air_fan_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration air fan placement',
{'name': u'Regeneration Air Fan Placement',
'pyname': u'regeneration_air_fan_placement',
'default': u'DrawThrough',
'required-field': False,
'autosizable': False,
'accepted-values': [u'BlowThrough',
u'DrawThrough'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air heater object type',
{'name': u'Regeneration Air Heater Object Type',
'pyname': u'regeneration_air_heater_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Heating:Electric',
u'Coil:Heating:Gas',
u'Coil:Heating:Water',
u'Coil:Heating:Steam'],
'autocalculatable': False,
'type': 'alpha'}),
(u'regeneration air heater name',
{'name': u'Regeneration Air Heater Name',
'pyname': u'regeneration_air_heater_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'regeneration inlet air setpoint temperature',
{'name': u'Regeneration Inlet Air Setpoint Temperature',
'pyname': u'regeneration_inlet_air_setpoint_temperature',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'companion cooling coil object type',
{'name': u'Companion Cooling Coil Object Type',
'pyname': u'companion_cooling_coil_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Coil:Cooling:DX:SingleSpeed',
u'Coil:Cooling:DX:TwoStageWithHumidityControlMode'],
'autocalculatable': False,
'type': 'alpha'}),
(u'companion cooling coil name',
{'name': u'Companion Cooling Coil Name',
'pyname': u'companion_cooling_coil_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'companion cooling coil upstream of dehumidifier process inlet',
{'name': u'Companion Cooling Coil Upstream of Dehumidifier Process Inlet',
'pyname': u'companion_cooling_coil_upstream_of_dehumidifier_process_inlet',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'companion coil regeneration air heating',
{'name': u'Companion Coil Regeneration Air Heating',
'pyname': u'companion_coil_regeneration_air_heating',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'exhaust fan maximum flow rate',
{'name': u'Exhaust Fan Maximum Flow Rate',
'pyname': u'exhaust_fan_maximum_flow_rate',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'exhaust fan maximum power',
{'name': u'Exhaust Fan Maximum Power',
'pyname': u'exhaust_fan_maximum_power',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W'}),
(u'exhaust fan power curve name',
{'name': u'Exhaust Fan Power Curve Name',
'pyname': u'exhaust_fan_power_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Humidifiers and Dehumidifiers',
'min-fields': 8,
'name': u'Dehumidifier:Desiccant:System',
'pyname': u'DehumidifierDesiccantSystem',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def desiccant_heat_exchanger_object_type(self):
"""field `Desiccant Heat Exchanger Object Type`
Args:
value (str): value for IDD Field `Desiccant Heat Exchanger Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `desiccant_heat_exchanger_object_type` or None if not set
"""
return self["Desiccant Heat Exchanger Object Type"]
@desiccant_heat_exchanger_object_type.setter
def desiccant_heat_exchanger_object_type(self, value=None):
"""Corresponds to IDD field `Desiccant Heat Exchanger Object Type`"""
self["Desiccant Heat Exchanger Object Type"] = value
@property
def desiccant_heat_exchanger_name(self):
"""field `Desiccant Heat Exchanger Name`
Args:
value (str): value for IDD Field `Desiccant Heat Exchanger Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `desiccant_heat_exchanger_name` or None if not set
"""
return self["Desiccant Heat Exchanger Name"]
@desiccant_heat_exchanger_name.setter
def desiccant_heat_exchanger_name(self, value=None):
"""Corresponds to IDD field `Desiccant Heat Exchanger Name`"""
self["Desiccant Heat Exchanger Name"] = value
@property
def sensor_node_name(self):
"""field `Sensor Node Name`
Args:
value (str): value for IDD Field `Sensor Node Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `sensor_node_name` or None if not set
"""
return self["Sensor Node Name"]
@sensor_node_name.setter
def sensor_node_name(self, value=None):
"""Corresponds to IDD field `Sensor Node Name`"""
self["Sensor Node Name"] = value
@property
def regeneration_air_fan_object_type(self):
"""field `Regeneration Air Fan Object Type`
Args:
value (str): value for IDD Field `Regeneration Air Fan Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_object_type` or None if not set
"""
return self["Regeneration Air Fan Object Type"]
@regeneration_air_fan_object_type.setter
def regeneration_air_fan_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Air Fan Object Type`"""
self["Regeneration Air Fan Object Type"] = value
@property
def regeneration_air_fan_name(self):
"""field `Regeneration Air Fan Name`
Args:
value (str): value for IDD Field `Regeneration Air Fan Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_name` or None if not set
"""
return self["Regeneration Air Fan Name"]
@regeneration_air_fan_name.setter
def regeneration_air_fan_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Fan Name`"""
self["Regeneration Air Fan Name"] = value
@property
def regeneration_air_fan_placement(self):
"""field `Regeneration Air Fan Placement`
| Default value: DrawThrough
Args:
value (str): value for IDD Field `Regeneration Air Fan Placement`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_fan_placement` or None if not set
"""
return self["Regeneration Air Fan Placement"]
@regeneration_air_fan_placement.setter
def regeneration_air_fan_placement(self, value="DrawThrough"):
"""Corresponds to IDD field `Regeneration Air Fan Placement`"""
self["Regeneration Air Fan Placement"] = value
@property
def regeneration_air_heater_object_type(self):
"""field `Regeneration Air Heater Object Type`
| works with gas, electric, hot water and steam heating coils
Args:
value (str): value for IDD Field `Regeneration Air Heater Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_heater_object_type` or None if not set
"""
return self["Regeneration Air Heater Object Type"]
@regeneration_air_heater_object_type.setter
def regeneration_air_heater_object_type(self, value=None):
"""Corresponds to IDD field `Regeneration Air Heater Object Type`"""
self["Regeneration Air Heater Object Type"] = value
@property
def regeneration_air_heater_name(self):
"""field `Regeneration Air Heater Name`
Args:
value (str): value for IDD Field `Regeneration Air Heater Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `regeneration_air_heater_name` or None if not set
"""
return self["Regeneration Air Heater Name"]
@regeneration_air_heater_name.setter
def regeneration_air_heater_name(self, value=None):
"""Corresponds to IDD field `Regeneration Air Heater Name`"""
self["Regeneration Air Heater Name"] = value
@property
def regeneration_inlet_air_setpoint_temperature(self):
"""field `Regeneration Inlet Air Setpoint Temperature`
| Units: C
Args:
value (float): value for IDD Field `Regeneration Inlet Air Setpoint Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `regeneration_inlet_air_setpoint_temperature` or None if not set
"""
return self["Regeneration Inlet Air Setpoint Temperature"]
@regeneration_inlet_air_setpoint_temperature.setter
def regeneration_inlet_air_setpoint_temperature(self, value=None):
"""Corresponds to IDD field `Regeneration Inlet Air Setpoint
Temperature`"""
self["Regeneration Inlet Air Setpoint Temperature"] = value
@property
def companion_cooling_coil_object_type(self):
"""field `Companion Cooling Coil Object Type`
Args:
value (str): value for IDD Field `Companion Cooling Coil Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_object_type` or None if not set
"""
return self["Companion Cooling Coil Object Type"]
@companion_cooling_coil_object_type.setter
def companion_cooling_coil_object_type(self, value=None):
"""Corresponds to IDD field `Companion Cooling Coil Object Type`"""
self["Companion Cooling Coil Object Type"] = value
@property
def companion_cooling_coil_name(self):
"""field `Companion Cooling Coil Name`
Args:
value (str): value for IDD Field `Companion Cooling Coil Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_name` or None if not set
"""
return self["Companion Cooling Coil Name"]
@companion_cooling_coil_name.setter
def companion_cooling_coil_name(self, value=None):
"""Corresponds to IDD field `Companion Cooling Coil Name`"""
self["Companion Cooling Coil Name"] = value
@property
def companion_cooling_coil_upstream_of_dehumidifier_process_inlet(self):
"""field `Companion Cooling Coil Upstream of Dehumidifier Process
Inlet`
| Select Yes if the companion cooling coil is located directly upstream
| of the desiccant heat exchanger's process air inlet node.
| Default value: No
Args:
value (str): value for IDD Field `Companion Cooling Coil Upstream of Dehumidifier Process Inlet`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_cooling_coil_upstream_of_dehumidifier_process_inlet` or None if not set
"""
return self[
"Companion Cooling Coil Upstream of Dehumidifier Process Inlet"]
@companion_cooling_coil_upstream_of_dehumidifier_process_inlet.setter
def companion_cooling_coil_upstream_of_dehumidifier_process_inlet(
self,
value="No"):
"""Corresponds to IDD field `Companion Cooling Coil Upstream of
Dehumidifier Process Inlet`"""
self[
"Companion Cooling Coil Upstream of Dehumidifier Process Inlet"] = value
@property
def companion_coil_regeneration_air_heating(self):
"""field `Companion Coil Regeneration Air Heating`
| Default value: No
Args:
value (str): value for IDD Field `Companion Coil Regeneration Air Heating`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `companion_coil_regeneration_air_heating` or None if not set
"""
return self["Companion Coil Regeneration Air Heating"]
@companion_coil_regeneration_air_heating.setter
def companion_coil_regeneration_air_heating(self, value="No"):
"""Corresponds to IDD field `Companion Coil Regeneration Air
Heating`"""
self["Companion Coil Regeneration Air Heating"] = value
@property
def exhaust_fan_maximum_flow_rate(self):
"""field `Exhaust Fan Maximum Flow Rate`
| Units: m3/s
Args:
value (float): value for IDD Field `Exhaust Fan Maximum Flow Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `exhaust_fan_maximum_flow_rate` or None if not set
"""
return self["Exhaust Fan Maximum Flow Rate"]
@exhaust_fan_maximum_flow_rate.setter
def exhaust_fan_maximum_flow_rate(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Maximum Flow Rate`"""
self["Exhaust Fan Maximum Flow Rate"] = value
@property
def exhaust_fan_maximum_power(self):
"""field `Exhaust Fan Maximum Power`
| Units: W
Args:
value (float): value for IDD Field `Exhaust Fan Maximum Power`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `exhaust_fan_maximum_power` or None if not set
"""
return self["Exhaust Fan Maximum Power"]
@exhaust_fan_maximum_power.setter
def exhaust_fan_maximum_power(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Maximum Power`"""
self["Exhaust Fan Maximum Power"] = value
@property
def exhaust_fan_power_curve_name(self):
"""field `Exhaust Fan Power Curve Name`
| Curve object type must be Curve:Quadratic or Curve:Cubic
Args:
value (str): value for IDD Field `Exhaust Fan Power Curve Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `exhaust_fan_power_curve_name` or None if not set
"""
return self["Exhaust Fan Power Curve Name"]
@exhaust_fan_power_curve_name.setter
def exhaust_fan_power_curve_name(self, value=None):
"""Corresponds to IDD field `Exhaust Fan Power Curve Name`"""
self["Exhaust Fan Power Curve Name"] = value
|
from babel.dates import format_date, format_datetime, format_time, format_interval, LC_TIME
class Time(object):
FULL = 'full'
LONG = 'long'
MEDIUM = 'medium'
SHORT = 'short'
def __init__(self, locale=None, time_zone=None):
"""
Constructor
:param locale: The locale to use
:type locale: babel.core.Locale
:param time_zone: The time zone
:type time_zone: pytz.tzinfo.DstTzInfo
"""
if locale is not None:
self._locale = locale
else:
self._locale = LC_TIME
self.time_zone = time_zone
def date(self, date, format=MEDIUM):
"""
Format date
:param date: The date
:param format: The format
:return: Formatted date
"""
return format_date(date, format=format, locale=self._locale)
def datetime(self, datetime, format=MEDIUM):
"""
Format datetime
:param datetime: The datetime
:param format: The format
:return: Formatted datetime
"""
return format_datetime(datetime, format=format, locale=self._locale, tzinfo=self.time_zone)
def time(self, time, format=MEDIUM):
"""
Format time
:param time: The time
:param format: The format
:return: Formatted time
"""
return format_time(time, format=format, locale=self._locale, tzinfo=self.time_zone)
def interval(self, start_datetime, end_datetime, skeleton=None):
"""
Format interval
:param start_datetime: Start time/date/datetime
:param end_datetime: End time/date/datetime
:param skeleton: The "skeleton format" to use for formatting
:return: The formatted interval
"""
return format_interval(start_datetime, end_datetime, skeleton=skeleton, locale=self._locale, tzinfo=self.time_zone)
|
"""Tests for projectq.meta._dirtyqubit.py"""
from projectq.meta import ComputeTag, _dirtyqubit
def test_dirty_qubit_tag():
tag0 = _dirtyqubit.DirtyQubitTag()
tag1 = _dirtyqubit.DirtyQubitTag()
tag2 = ComputeTag()
assert tag0 == tag1
assert not tag0 != tag1
assert not tag0 == tag2
|
from mlflow.tracking.request_header.abstract_request_header_provider import RequestHeaderProvider
from mlflow.utils import databricks_utils
class DatabricksRequestHeaderProvider(RequestHeaderProvider):
"""
Provides request headers indicating the type of Databricks environment from which a request
was made.
"""
def in_context(self):
return (
databricks_utils.is_in_cluster()
or databricks_utils.is_in_databricks_notebook()
or databricks_utils.is_in_databricks_job()
)
def request_headers(self):
request_headers = {}
if databricks_utils.is_in_databricks_notebook():
request_headers["notebook_id"] = databricks_utils.get_notebook_id()
if databricks_utils.is_in_databricks_job():
request_headers["job_id"] = databricks_utils.get_job_id()
request_headers["job_run_id"] = databricks_utils.get_job_run_id()
request_headers["job_type"] = databricks_utils.get_job_type()
if databricks_utils.is_in_cluster():
request_headers["cluster_id"] = databricks_utils.get_cluster_id()
command_run_id = databricks_utils.get_command_run_id()
if command_run_id is not None:
request_headers["command_run_id"] = command_run_id
return request_headers
|
"""Calling Functions
Python has a lot of stuff built in that you can
just use. Much of it is exposed through **functions**.
You have already seen a common one: |print|.
A function is _called_ by placing |()| after its
name. If it accepts **arguments**, then they go
inside of the |()|. The |len| function
demonstrated here accepts a single argument: the
thing you want to know the length of.
x = len("hello") # x gets the value 5
All Python functions **return a value**. In the
case of |len|, this means that calling it produces
a new value as above. You can assign that value to
a variable, or print it, or pass it into another
function. Or, you can ignore it and it will go
away.
To understand how function calls work, it helps
to think of calling a function as *replacing it
with the return value*. In the example above, that
means that the entire call, from the name |len| to
the closing paren, is replaced with the length of
"hello", which is 5.
When you see a function call anywhere and want to
understand what it means, you can imagine working
from the inside out, left to right, replacing
calls with the values they return.
x = len([1, 2, len('hi')])
# innermost is len('hi') - replace it:
x = len([1, 2, 2])
# next is len([1, 2, 2]) - replace it:
x = 3
# No more calls - we're done.
If you ever see a statement or expression that has
function calls in it, you can understand what is
going on by following the above procedure in your
mind: replace the innermost, calls with values
(they can be pretend values - we're imagining for
the sake of understanding, here). Then work to the
right, then work outward and do it again until
there are no calls left.
Functions are very important in all of computer
science, so taking the time to understand what is
happening right now is very useful for what's
coming up.
Exercises
- One important function in Python is |repr|,
which prints a "representation" of an object.
Try printing |repr("10")|. See how it differs
from |repr(10)|.
- Convert the string |"2000"| into an integer
by calling |int|.
"""
__doc__ = """Calling Functions
Note: If you don't use a return value,
it gets lost.
"""
len("hi")
length = len("how long is this anyway?")
print("length =", length)
print("The length is", len("hi there"))
print("Just print:", "Hi there")
print("repr print:", repr("Hi there"))
|
"""
Copyright 2012 Pontiflex, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ca.models import Base, DBSession
__predicates__ = set()
def predicate(fun):
__predicates__.add(fun.__func__)
return fun
def get_predicate(obj, name):
pred = getattr(obj, name, None)
if pred is None or pred.__func__ not in __predicates__:
return _Throw()
return pred
class _Throw:
def __call__(self, *args, **kwargs):
raise PredicateError()
class PredicateError(AttributeError): pass
|
from survey.objects import *
import json
"""
This module provides an example of how to construct a questionnaire in Python.
Questionnaires can be saved by calling jsonize and dumping their contents.
Jsonized surveys can be reused, manipulated, and sent via RPC to another service.
"""
q1 = Question("What is your age?"
, ["< 18", "18-34", "35-64", "> 65"]
, qtypes["radio"])
q2 = Question("What is your political affiliation?"
, ["Democrat", "Republican", "Indepedent"]
, qtypes["radio"]
, shuffle=True)
q3 = Question("Which issues do you care about the most?"
, ["Gun control", "Reproductive Rights", "The Economy", "Foreign Relations"]
, qtypes["check"]
,shuffle=True)
q4 = Question("What is your year of birth?"
, [x+1910 for x in range(90)]
, qtypes["dropdown"])
survey = Survey([q1, q2, q3, q4])
filename = 'jsonized_survey.txt'
f = open(filename, 'w')
f.write(json.dumps(survey.jsonize, sort_keys = True, indent = 4))
f.close()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
filename = sys.argv[1]
print "See "+filename+" for a jsonzied survey."
|
"""Connections to gcloud datastore API servers."""
import os
from gcloud import connection
from gcloud.exceptions import make_exception
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
SCOPE = ('https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/userinfo.email')
"""The scopes required for authenticating as a Cloud Datastore consumer."""
_GCD_HOST_ENV_VAR_NAME = 'DATASTORE_HOST'
class Connection(connection.Connection):
"""A connection to the Google Cloud Datastore via the Protobuf API.
This class should understand only the basic types (and protobufs)
in method arguments, however should be capable of returning advanced types.
:type credentials: :class:`oauth2client.client.OAuth2Credentials`
:param credentials: The OAuth2 Credentials to use for this connection.
:type api_base_url: string
:param api_base_url: The base of the API call URL. Defaults to the value
from :mod:`gcloud.connection`.
"""
API_VERSION = 'v1beta2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = ('{api_base}/datastore/{api_version}'
'/datasets/{dataset_id}/{method}')
"""A template for the URL of a particular API call."""
def __init__(self, credentials=None, http=None, api_base_url=None):
credentials = self._create_scoped_credentials(credentials, SCOPE)
super(Connection, self).__init__(credentials=credentials, http=http)
if api_base_url is None:
api_base_url = os.getenv(_GCD_HOST_ENV_VAR_NAME,
connection.API_BASE_URL)
self.api_base_url = api_base_url
def _request(self, dataset_id, method, data):
"""Make a request over the Http transport to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset of which to make the request.
:type method: string
:param method: The API call method name (ie, ``runQuery``,
``lookup``, etc)
:type data: string
:param data: The data to send with the API call.
Typically this is a serialized Protobuf string.
:rtype: string
:returns: The string response content from the API call.
:raises: :class:`gcloud.exceptions.GCloudError` if the response
code is not 200 OK.
"""
headers = {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(data)),
'User-Agent': self.USER_AGENT,
}
headers, content = self.http.request(
uri=self.build_api_url(dataset_id=dataset_id, method=method),
method='POST', headers=headers, body=data)
status = headers['status']
if status != '200':
raise make_exception(headers, content, use_json=False)
return content
def _rpc(self, dataset_id, method, request_pb, response_pb_cls):
"""Make a protobuf RPC request.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The name of the method to invoke.
:type request_pb: :class:`google.protobuf.message.Message` instance
:param request_pb: the protobuf instance representing the request.
:type response_pb_cls: A :class:`google.protobuf.message.Message'
subclass.
:param response_pb_cls: The class used to unmarshall the response
protobuf.
"""
response = self._request(dataset_id=dataset_id, method=method,
data=request_pb.SerializeToString())
return response_pb_cls.FromString(response)
def build_api_url(self, dataset_id, method, base_url=None,
api_version=None):
"""Construct the URL for a particular API call.
This method is used internally to come up with the URL to use when
making RPCs to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The API method to call (ie, runQuery, lookup, ...).
:type base_url: string
:param base_url: The base URL where the API lives.
You shouldn't have to provide this.
:type api_version: string
:param api_version: The version of the API to connect to.
You shouldn't have to provide this.
"""
return self.API_URL_TEMPLATE.format(
api_base=(base_url or self.api_base_url),
api_version=(api_version or self.API_VERSION),
dataset_id=dataset_id, method=method)
def lookup(self, dataset_id, key_pbs,
eventual=False, transaction_id=None):
"""Lookup keys from a dataset in the Cloud Datastore.
Maps the ``DatastoreService.Lookup`` protobuf RPC.
This method deals only with protobufs
(:class:`gcloud.datastore._datastore_v1_pb2.Key` and
:class:`gcloud.datastore._datastore_v1_pb2.Entity`) and is used
under the hood in :func:`gcloud.datastore.get`:
>>> from gcloud import datastore
>>> key = datastore.Key('MyKind', 1234, dataset_id='dataset-id')
>>> datastore.get(key)
[<Entity object>]
Using the ``connection`` class directly:
>>> connection.lookup('dataset-id', [key.to_protobuf()])
[<Entity protobuf>]
:type dataset_id: string
:param dataset_id: The ID of the dataset to look up the keys.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type eventual: boolean
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: string
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: tuple
:returns: A triple of (``results``, ``missing``, ``deferred``) where
both ``results`` and ``missing`` are lists of
:class:`gcloud.datastore._datastore_v1_pb2.Entity` and
``deferred`` is a list of
:class:`gcloud.datastore._datastore_v1_pb2.Key`.
"""
lookup_request = datastore_pb.LookupRequest()
_set_read_options(lookup_request, eventual, transaction_id)
_add_keys_to_request(lookup_request.key, key_pbs)
lookup_response = self._rpc(dataset_id, 'lookup', lookup_request,
datastore_pb.LookupResponse)
results = [result.entity for result in lookup_response.found]
missing = [result.entity for result in lookup_response.missing]
return results, missing, list(lookup_response.deferred)
def run_query(self, dataset_id, query_pb, namespace=None,
eventual=False, transaction_id=None):
"""Run a query on the Cloud Datastore.
Maps the ``DatastoreService.RunQuery`` protobuf RPC.
Given a Query protobuf, sends a ``runQuery`` request to the
Cloud Datastore API and returns a list of entity protobufs
matching the query.
You typically wouldn't use this method directly, in favor of the
:meth:`gcloud.datastore.query.Query.fetch` method.
Under the hood, the :class:`gcloud.datastore.query.Query` class
uses this method to fetch data:
>>> from gcloud import datastore
>>> query = datastore.Query(kind='MyKind')
>>> query.add_filter('property', '=', 'val')
Using the query's ``fetch_page`` method...
>>> entities, cursor, more_results = query.fetch_page()
>>> entities
[<list of Entity unmarshalled from protobuf>]
>>> cursor
<string containing cursor where fetch stopped>
>>> more_results
<boolean of more results>
Under the hood this is doing...
>>> connection.run_query('dataset-id', query.to_protobuf())
[<list of Entity Protobufs>], cursor, more_results, skipped_results
:type dataset_id: string
:param dataset_id: The ID of the dataset over which to run the query.
:type query_pb: :class:`gcloud.datastore._datastore_v1_pb2.Query`
:param query_pb: The Protobuf representing the query to run.
:type namespace: string
:param namespace: The namespace over which to run the query.
:type eventual: boolean
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: string
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
"""
request = datastore_pb.RunQueryRequest()
_set_read_options(request, eventual, transaction_id)
if namespace:
request.partition_id.namespace = namespace
request.query.CopyFrom(query_pb)
response = self._rpc(dataset_id, 'runQuery', request,
datastore_pb.RunQueryResponse)
return (
[e.entity for e in response.batch.entity_result],
response.batch.end_cursor, # Assume response always has cursor.
response.batch.more_results,
response.batch.skipped_results,
)
def begin_transaction(self, dataset_id, serializable=False):
"""Begin a transaction.
Maps the ``DatastoreService.BeginTransaction`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID dataset to which the transaction applies.
:type serializable: boolean
:param serializable: Boolean indicating if the isolation level of the
transaction should be SERIALIZABLE (True) or
SNAPSHOT (False).
:rtype: :class:`._datastore_v1_pb2.BeginTransactionResponse`
:returns': the result protobuf for the begin transaction request.
"""
request = datastore_pb.BeginTransactionRequest()
if serializable:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SERIALIZABLE)
else:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SNAPSHOT)
response = self._rpc(dataset_id, 'beginTransaction', request,
datastore_pb.BeginTransactionResponse)
return response.transaction
def commit(self, dataset_id, mutation_pb, transaction_id):
"""Commit dataset mutations in context of current transation (if any).
Maps the ``DatastoreService.Commit`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID dataset to which the transaction applies.
:type mutation_pb: :class:`datastore_pb.Mutation`.
:param mutation_pb: The protobuf for the mutations being saved.
:type transaction_id: string or None
:param transaction_id: The transaction ID returned from
:meth:`begin_transaction`. Non-transactional
batches must pass ``None``.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.MutationResult`.
:returns': the result protobuf for the mutation.
"""
request = datastore_pb.CommitRequest()
if transaction_id:
request.mode = datastore_pb.CommitRequest.TRANSACTIONAL
request.transaction = transaction_id
else:
request.mode = datastore_pb.CommitRequest.NON_TRANSACTIONAL
request.mutation.CopyFrom(mutation_pb)
response = self._rpc(dataset_id, 'commit', request,
datastore_pb.CommitResponse)
return response.mutation_result
def rollback(self, dataset_id, transaction_id):
"""Rollback the connection's existing transaction.
Maps the ``DatastoreService.Rollback`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID of the dataset to which the transaction
belongs.
:type transaction_id: string
:param transaction_id: The transaction ID returned from
:meth:`begin_transaction`.
"""
request = datastore_pb.RollbackRequest()
request.transaction = transaction_id
# Nothing to do with this response, so just execute the method.
self._rpc(dataset_id, 'rollback', request,
datastore_pb.RollbackResponse)
def allocate_ids(self, dataset_id, key_pbs):
"""Obtain backend-generated IDs for a set of keys.
Maps the ``DatastoreService.AllocateIds`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The ID of the dataset to which the transaction
belongs.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys for which the backend should allocate IDs.
:rtype: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:returns: An equal number of keys, with IDs filled in by the backend.
"""
request = datastore_pb.AllocateIdsRequest()
_add_keys_to_request(request.key, key_pbs)
# Nothing to do with this response, so just execute the method.
response = self._rpc(dataset_id, 'allocateIds', request,
datastore_pb.AllocateIdsResponse)
return list(response.key)
def _set_read_options(request, eventual, transaction_id):
"""Validate rules for read options, and assign to the request.
Helper method for ``lookup()`` and ``run_query``.
:raises: :class:`ValueError` if ``eventual`` is ``True`` and the
``transaction_id`` is not ``None``.
"""
if eventual and (transaction_id is not None):
raise ValueError('eventual must be False when in a transaction')
opts = request.read_options
if eventual:
opts.read_consistency = datastore_pb.ReadOptions.EVENTUAL
elif transaction_id:
opts.transaction = transaction_id
def _prepare_key_for_request(key_pb): # pragma: NO COVER copied from helpers
"""Add protobuf keys to a request object.
.. note::
This is copied from `helpers` to avoid a cycle:
_implicit_environ -> connection -> helpers -> key -> _implicit_environ
:type key_pb: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pb: A key to be added to a request.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.Key`
:returns: A key which will be added to a request. It will be the
original if nothing needs to be changed.
"""
if key_pb.partition_id.HasField('dataset_id'):
new_key_pb = datastore_pb.Key()
new_key_pb.CopyFrom(key_pb)
new_key_pb.partition_id.ClearField('dataset_id')
key_pb = new_key_pb
return key_pb
def _add_keys_to_request(request_field_pb, key_pbs):
"""Add protobuf keys to a request object.
:type request_field_pb: `RepeatedCompositeFieldContainer`
:param request_field_pb: A repeated proto field that contains keys.
:type key_pbs: list of :class:`gcloud.datastore._datastore_v1_pb2.Key`
:param key_pbs: The keys to add to a request.
"""
for key_pb in key_pbs:
key_pb = _prepare_key_for_request(key_pb)
request_field_pb.add().CopyFrom(key_pb)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .constants import MSG_NOSUB
from .nosub_message import NosubMessage
from .server_message_parser import ServerMessageParser
__all__ = ['NosubMessageParser']
class NosubMessageParser(ServerMessageParser):
MESSAGE_TYPE = MSG_NOSUB
def parse(self, pod):
return NosubMessage(pod['id'], pod.get('error'))
|
"""CIFAR-10 data pipeline with preprocessing.
The data is generated via generate_cifar10_tfrecords.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
WIDTH = 32
HEIGHT = 32
RGB_MEAN = [125.31, 122.95, 113.87]
RGB_STD = [62.99, 62.09, 66.70]
class CIFARInput(object):
"""Wrapper class for input_fn passed to TPUEstimator."""
def __init__(self, mode, config):
"""Initializes a CIFARInput object.
Args:
mode: one of [train, valid, test, augment, sample]
config: config dict built from config.py
Raises:
ValueError: invalid mode or data files
"""
self.mode = mode
self.config = config
if mode == 'train': # Training set (no validation & test)
self.data_files = config['train_data_files']
elif mode == 'train_eval': # For computing train error
self.data_files = [config['train_data_files'][0]]
elif mode == 'valid': # For computing validation error
self.data_files = [config['valid_data_file']]
elif mode == 'test': # For computing the test error
self.data_files = [config['test_data_file']]
elif mode == 'augment': # Training set (includes validation, no test)
self.data_files = (config['train_data_files'] +
[config['valid_data_file']])
elif mode == 'sample': # Fixed batch of 100 samples from validation
self.data_files = [config['sample_data_file']]
else:
raise ValueError('invalid mode')
if not self.data_files:
raise ValueError('no data files provided')
@property
def num_images(self):
"""Number of images in the dataset (depends on the mode)."""
if self.mode == 'train':
return 40000
elif self.mode == 'train_eval':
return 10000
elif self.mode == 'valid':
return 10000
elif self.mode == 'test':
return 10000
elif self.mode == 'augment':
return 50000
elif self.mode == 'sample':
return 100
def input_fn(self, params):
"""Returns a CIFAR tf.data.Dataset object.
Args:
params: parameter dict pass by Estimator.
Returns:
tf.data.Dataset object
"""
batch_size = params['batch_size']
is_training = (self.mode == 'train' or self.mode == 'augment')
dataset = tf.data.TFRecordDataset(self.data_files)
dataset = dataset.prefetch(buffer_size=batch_size)
# Repeat dataset for training modes
if is_training:
# Shuffle buffer with whole dataset to ensure full randomness per epoch
dataset = dataset.cache().apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=self.num_images))
# This is a hack to allow computing metrics on a fixed batch on TPU. Because
# TPU shards the batch acrosss cores, we replicate the fixed batch so that
# each core contains the whole batch.
if self.mode == 'sample':
dataset = dataset.repeat()
# Parse, preprocess, and batch images
parser_fn = functools.partial(_parser, is_training)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
parser_fn,
batch_size=batch_size,
num_parallel_batches=self.config['tpu_num_shards'],
drop_remainder=True))
# Assign static batch size dimension
dataset = dataset.map(functools.partial(_set_batch_dimension, batch_size))
# Prefetch to overlap in-feed with training
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def _preprocess(image):
"""Perform standard CIFAR preprocessing.
Pads the image then performs a random crop.
Then, image is flipped horizontally randomly.
Args:
image: image Tensor with shape [height, width, 3]
Returns:
preprocessed image with the same dimensions.
"""
# Pad 4 pixels on all sides with 0
image = tf.image.resize_image_with_crop_or_pad(
image, HEIGHT + 8, WIDTH + 8)
# Random crop
image = tf.random_crop(image, [HEIGHT, WIDTH, 3], seed=0)
# Random flip
image = tf.image.random_flip_left_right(image, seed=0)
return image
def _parser(use_preprocessing, serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image'], tf.uint8)
image.set_shape([3 * HEIGHT * WIDTH])
image = tf.reshape(image, [3, HEIGHT, WIDTH])
# TODO(chrisying): handle NCHW format
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, tf.float32)
if use_preprocessing:
image = _preprocess(image)
image -= tf.constant(RGB_MEAN, shape=[1, 1, 3])
image /= tf.constant(RGB_STD, shape=[1, 1, 3])
label = tf.cast(features['label'], tf.int32)
return image, label
def _set_batch_dimension(batch_size, images, labels):
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
return images, labels
|
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"AppPlaceholderFieldEnum",},
)
class AppPlaceholderFieldEnum(proto.Message):
r"""Values for App placeholder fields. """
class AppPlaceholderField(proto.Enum):
r"""Possible values for App placeholder fields."""
UNSPECIFIED = 0
UNKNOWN = 1
STORE = 2
ID = 3
LINK_TEXT = 4
URL = 5
FINAL_URLS = 6
FINAL_MOBILE_URLS = 7
TRACKING_URL = 8
FINAL_URL_SUFFIX = 9
__all__ = tuple(sorted(__protobuf__.manifest))
|
for i in range(2,6):
for j in range(1,i):
print j,
print " "
|
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Network support functions.**
NOTES:
- ``ping`` requires root authority to create ICMP sockets in Linux
- the ``/bin/ping`` command doesn't need prior root authority (because it has
the setuid bit set)
- For Linux, it's therefore best to use the system ``ping``.
https://stackoverflow.com/questions/2953462/pinging-servers-in-python
https://stackoverflow.com/questions/316866/ping-a-site-in-python
- Note that if you want a sub-second timeout, things get trickier.
One option is ``fping``.
"""
import os
import ssl
import subprocess
import sys
import tempfile
from typing import BinaryIO, Dict, Generator, Iterable
import urllib.request
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
log = get_brace_style_log_with_null_handler(__name__)
def ping(hostname: str, timeout_s: int = 5) -> bool:
"""
Pings a host, using OS tools.
Args:
hostname: host name or IP address
timeout_s: timeout in seconds
Returns:
was the ping successful?
"""
if sys.platform == "win32":
timeout_ms = timeout_s * 1000
args = [
"ping",
hostname,
"-n", "1", # ping count
"-w", str(timeout_ms), # timeout
]
elif sys.platform.startswith('linux'):
args = [
"ping",
hostname,
"-c", "1", # ping count
"-w", str(timeout_s), # timeout
]
else:
raise AssertionError("Don't know how to ping on this operating system")
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.communicate()
retcode = proc.returncode
return retcode == 0 # zero success, non-zero failure
def download(url: str,
filename: str,
skip_cert_verify: bool = True,
headers: Dict[str, str] = None) -> None:
"""
Downloads a URL to a file.
Args:
url:
URL to download from
filename:
file to save to
skip_cert_verify:
skip SSL certificate check?
headers:
request headers (if not specified, a default will be used that
mimics Mozilla 5.0 to avoid certain HTTP 403 errors)
"""
headers = {
'User-Agent': 'Mozilla/5.0'
} if headers is None else headers
log.info("Downloading from {} to {}", url, filename)
# urllib.request.urlretrieve(url, filename)
# ... sometimes fails (e.g. downloading
# https://www.openssl.org/source/openssl-1.1.0g.tar.gz under Windows) with:
# ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) # noqa
# ... due to this certificate root problem (probably because OpenSSL
# [used by Python] doesn't play entirely by the same rules as others?):
# https://stackoverflow.com/questions/27804710
# So:
# Patching this by faking a browser request by adding User-Agent to request
# headers, using this as example:
# https://stackoverflow.com/questions/42863240/how-to-get-round-the-http-error-403-forbidden-with-urllib-request-using-python # noqa
ctx = ssl.create_default_context() # type: ssl.SSLContext
if skip_cert_verify:
log.debug("Skipping SSL certificate check for " + url)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
page = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(page, context=ctx) as u, \
open(filename, 'wb') as f:
f.write(u.read())
def gen_binary_files_from_urls(
urls: Iterable[str],
on_disk: bool = False,
show_info: bool = True) -> Generator[BinaryIO, None, None]:
"""
Generate binary files from a series of URLs (one per URL).
Args:
urls: iterable of URLs
on_disk: if ``True``, yields files that are on disk (permitting
random access); if ``False``, yields in-memory files (which will
not permit random access)
show_info: show progress to the log?
Yields:
files, each of type :class:`BinaryIO`
"""
for url in urls:
if on_disk:
# Necessary for e.g. zip processing (random access)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, "tempfile")
download(url=url, filename=filename)
with open(filename, 'rb') as f:
yield f
else:
if show_info:
log.info("Reading from URL: {}", url)
with urllib.request.urlopen(url) as f:
yield f
if show_info:
log.info("... finished reading from URL: {}", url)
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
print(find_packages(exclude=['contrib', 'docs', 'tests']))
setup(
name='pipelines',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1-BETA',
description='A ',
long_description=long_description,
# The project's main homepage.
url='https://github.com/InformaticsMatters/pipelines/',
# Author details
author='Tim Dudgeon',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["pipelines.dimorphite", "pipelines.dmpk", "pipelines.docking", "pipelines.rdkit", "pipelines.xchem"],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
|
import csv
header = ['percent', 'sex', 'sSize']
d1 = ['58.35', 'F', '537']
d2 = ['41.65', 'M', '537']
d3 = ['7', 'F', '1023']
d4 = ['12', 'M', '1023']
d5 = ['10.2', 'F', '5013']
d6 = ['19.2', 'M', '5013']
d7 = ['18', 'F', '9215']
d8 = ['26', 'M', '9215']
d9 = ['10', 'F', '15154']
d10 = ['15', 'M', '15154']
print "writing file"
userData = [header, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10]
with open('cannausers.csv', 'w') as cFile:
writeCSV = csv.writer(cFile)
writeCSV.writerows(userData)
cFile.close()
print "created cannausers.csv"
|
import sys
import get_dictionary
def unkify(tokens, words_dict):
final = []
for token in tokens:
# only process the train singletons and unknown words
if len(token.rstrip()) == 0:
final.append('UNK')
elif not(token.rstrip() in words_dict):
numCaps = 0
hasDigit = False
hasDash = False
hasLower = False
for char in token.rstrip():
if char.isdigit():
hasDigit = True
elif char == '-':
hasDash = True
elif char.isalpha():
if char.islower():
hasLower = True
elif char.isupper():
numCaps += 1
result = 'UNK'
lower = token.rstrip().lower()
ch0 = token.rstrip()[0]
if ch0.isupper():
if numCaps == 1:
result = result + '-INITC'
if lower in words_dict:
result = result + '-KNOWNLC'
else:
result = result + '-CAPS'
elif not(ch0.isalpha()) and numCaps > 0:
result = result + '-CAPS'
elif hasLower:
result = result + '-LC'
if hasDigit:
result = result + '-NUM'
if hasDash:
result = result + '-DASH'
if lower[-1] == 's' and len(lower) >= 3:
ch2 = lower[-2]
if not(ch2 == 's') and not(ch2 == 'i') and not(ch2 == 'u'):
result = result + '-s'
elif len(lower) >= 5 and not(hasDash) and not(hasDigit and numCaps > 0):
if lower[-2:] == 'ed':
result = result + '-ed'
elif lower[-3:] == 'ing':
result = result + '-ing'
elif lower[-3:] == 'ion':
result = result + '-ion'
elif lower[-2:] == 'er':
result = result + '-er'
elif lower[-3:] == 'est':
result = result + '-est'
elif lower[-2:] == 'ly':
result = result + '-ly'
elif lower[-3:] == 'ity':
result = result + '-ity'
elif lower[-1] == 'y':
result = result + '-y'
elif lower[-2:] == 'al':
result = result + '-al'
final.append(result)
else:
final.append(token.rstrip())
return final
def is_next_open_bracket(line, start_idx):
for char in line[(start_idx + 1):]:
if char == '(':
return True
elif char == ')':
return False
raise IndexError('Bracket possibly not balanced, open bracket not followed by closed bracket')
def get_between_brackets(line, start_idx):
output = []
for char in line[(start_idx + 1):]:
if char == ')':
break
assert not(char == '(')
output.append(char)
return ''.join(output)
def get_tags_tokens_lowercase(line):
output = []
#print 'curr line', line_strip
line_strip = line.rstrip()
#print 'length of the sentence', len(line_strip)
for i in range(len(line_strip)):
if i == 0:
assert line_strip[i] == '('
if line_strip[i] == '(' and not(is_next_open_bracket(line_strip, i)): # fulfilling this condition means this is a terminal symbol
output.append(get_between_brackets(line_strip, i))
#print 'output:',output
output_tags = []
output_tokens = []
output_lowercase = []
for terminal in output:
terminal_split = terminal.split()
assert len(terminal_split) == 2 # each terminal contains a POS tag and word
output_tags.append(terminal_split[0])
output_tokens.append(terminal_split[1])
output_lowercase.append(terminal_split[1].lower())
return [output_tags, output_tokens, output_lowercase]
def get_nonterminal(line, start_idx):
assert line[start_idx] == '(' # make sure it's an open bracket
output = []
for char in line[(start_idx + 1):]:
if char == ' ':
break
assert not(char == '(') and not(char == ')')
output.append(char)
return ''.join(output)
def get_actions(line):
output_actions = []
line_strip = line.rstrip()
i = 0
max_idx = (len(line_strip) - 1)
while i <= max_idx:
assert line_strip[i] == '(' or line_strip[i] == ')'
if line_strip[i] == '(':
if is_next_open_bracket(line_strip, i): # open non-terminal
curr_NT = get_nonterminal(line_strip, i)
output_actions.append('NT(' + curr_NT + ')')
i += 1
while line_strip[i] != '(': # get the next open bracket, which may be a terminal or another non-terminal
i += 1
else: # it's a terminal symbol
output_actions.append('SHIFT')
while line_strip[i] != ')':
i += 1
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
else:
output_actions.append('REDUCE')
if i == max_idx:
break
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
assert i == max_idx
return output_actions
def main():
if len(sys.argv) != 3:
raise NotImplementedError('Program only takes two arguments: train file and dev file (for vocabulary mapping purposes)')
train_file = open(sys.argv[1], 'r')
lines = train_file.readlines()
train_file.close()
dev_file = open(sys.argv[2], 'r')
dev_lines = dev_file.readlines()
dev_file.close()
words_list = get_dictionary.get_dict(lines)
line_ctr = 0
# get the oracle for the train file
for line in dev_lines:
line_ctr += 1
# assert that the parenthesis are balanced
if line.count('(') != line.count(')'):
raise NotImplementedError('Unbalanced number of parenthesis in line ' + str(line_ctr))
# first line: the bracketed tree itself itself
print '# ' + line.rstrip()
tags, tokens, lowercase = get_tags_tokens_lowercase(line)
assert len(tags) == len(tokens)
assert len(tokens) == len(lowercase)
print ' '.join(tags)
print ' '.join(tokens)
print ' '.join(lowercase)
unkified = unkify(tokens, words_list)
print ' '.join(unkified)
output_actions = get_actions(line)
for action in output_actions:
print action
print ''
if __name__ == "__main__":
main()
|
"""Tests for Blenderbot Tokenizers, including common tests for BlenderbotSmallTokenizer."""
import unittest
from transformers import BlenderbotTokenizer, BlenderbotTokenizerFast
from transformers.file_utils import cached_property
class Blenderbot3BTokenizerTests(unittest.TestCase):
@cached_property
def tokenizer_3b(self):
return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
@cached_property
def rust_tokenizer_3b(self):
return BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B")
def test_encode_decode_cycle(self):
tok = self.tokenizer_3b
src_text = " I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text == decoded
def test_encode_decode_cycle_rust_tokenizer(self):
tok = self.rust_tokenizer_3b
src_text = " I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text == decoded
def test_3B_tokenization_same_as_parlai(self):
assert self.tokenizer_3b.add_prefix_space
assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
def test_3B_tokenization_same_as_parlai_rust_tokenizer(self):
assert self.rust_tokenizer_3b.add_prefix_space
assert self.rust_tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
|
import logging
import os.path
import math
from io import StringIO
from pptx.shapes.graphfrm import GraphicFrame
from pptx.chart.data import ChartData, XyChartData
from pptx.enum.chart import XL_CHART_TYPE as ct
from pptx.chart.chart import Chart
import pandas as pd
import numpy as np
from six import string_types
import pptx_template.pyel as pyel
import pptx_template.text as txt
import pptx_template.pptx_util as util
log = logging.getLogger()
def _nan_to_none(x):
# log.debug(u" type of x:%s is:%s" % (x, type(x)))
if isinstance(x, np.generic):
result = None if np.isnan(x) else x.item()
elif isinstance(x, string_types):
result = _to_unicode(x)
elif math.isnan(x):
result = None
else:
result = x
return result
def _to_unicode(s):
return s if isinstance(s, type(u"a")) else unicode(s,'utf-8')
def _build_xy_chart_data(csv, xy_transpose, number_format):
chart_data = XyChartData()
for i in range(1, csv.columns.size):
# nameに日本語が入ると後続処理中で、python v2.7の場合にUnicodeDecodeErrorが出るため対処。nameは結局pptx内では使われない
series = chart_data.add_series(u"column%s" % i, number_format=_normalize_number_format(number_format))
xy_col = csv.iloc[:, [0, i]]
for (_, row) in xy_col.iterrows():
if xy_transpose:
y, x = _nan_to_none(row[0]), _nan_to_none(row[1])
else:
x, y = _nan_to_none(row[0]), _nan_to_none(row[1])
log.debug(u" Adding xy %s,%s" % (x, y))
series.add_data_point(x, y)
return chart_data
def _build_chart_data(csv, number_format):
chart_data = ChartData()
categories = [_nan_to_none(x) or '' for x in csv.iloc[:,0].values]
log.debug(u" Setting categories with values:%s" % categories)
chart_data.categories = categories
for i in range(1, csv.columns.size):
col = csv.iloc[:, i]
values = [_nan_to_none(x) for x in col.values]
name = _to_unicode(col.name)
log.debug(u" Adding series:%s values:%s" % (name, values))
# 本来、number_formatは既存のchartの設定をそのまま引き継ぎたかったが、
# python-pptx v0.6.17 では、既存のchartのchart_dataを取得するAPIは存在せず、
# 新たにchart_dataを作って、chart.replace_data() する必要がある。
# そのため、number_formatは、modelのoptionから取得する方針とする。
chart_data.add_series(name, values, _normalize_number_format(number_format))
return chart_data
def _normalize_number_format(number_format):
"""
pptx.chartの内部で、'\\'が’\'扱いになってしまうため、更にエスケープ処理を行う
"""
return number_format.replace('\\','\\\\') if number_format != None else number_format
def _is_xy_chart(chart):
xy_charts = [ct.XY_SCATTER_LINES, ct.XY_SCATTER_LINES_NO_MARKERS, ct.XY_SCATTER, ct.XY_SCATTER_SMOOTH, ct.XY_SCATTER_SMOOTH_NO_MARKERS]
return chart.chart_type in xy_charts
def _set_value_axis(chart, chart_id, chart_setting):
max = chart_setting.get('value_axis_max')
min = chart_setting.get('value_axis_min')
if max or min:
util.set_value_axis(chart, max = max, min = min)
def _load_csv_into_dataframe(chart_id, chart_setting):
if 'body' in chart_setting:
csv_body = chart_setting.get('body')
return pd.read_csv(StringIO(csv_body), index_col=False)
elif 'tsv_body' in chart_setting:
tsv_body = chart_setting.get('tsv_body')
return pd.read_csv(StringIO(tsv_body), delimiter='\t', index_col=False)
else:
csv_file_name = chart_setting.get('file_name')
if not csv_file_name:
for ext in ['csv', 'tsv']:
csv_file_name = "%s.%s" % (chart_id, ext)
if os.path.isfile(csv_file_name):
break
else:
raise ValueError(u"File not found: csv or tsv for %s" % chart_id)
log.debug(u" Loading from csv file: %s" % csv_file_name)
delimiter = '\t' if csv_file_name.endswith('.tsv') else ','
return pd.read_csv(csv_file_name, delimiter=delimiter, index_col=False)
def _replace_chart_data_with_csv(chart, chart_id, chart_setting):
"""
1つのチャートに対して指定されたCSVからデータを読み込む。
"""
log.debug(chart_setting)
csv = _load_csv_into_dataframe(chart_id, chart_setting)
log.debug(u" Loaded Data:\n%s" % csv)
number_format = chart_setting.get("number_format")
xy_transpose = chart_setting.get("xy_transpose")
if _is_xy_chart(chart):
log.info(u"Setting csv/tsv into XY chart_id: %s" % chart_id)
chart_data = _build_xy_chart_data(csv, xy_transpose, number_format)
else:
log.info(u"Setting csv/tsv into chart_id: %s" % chart_id)
chart_data = _build_chart_data(csv, number_format)
chart.replace_data(chart_data)
log.debug(u" Completed chart data replacement.")
return
def load_data_into_chart(chart, model):
log.debug(u"model:%s" % model)
# チャートタイトルから {} で囲まれた文字列を探し、それをキーとしてチャート設定と紐付ける
if not chart.has_title or not chart.chart_title.has_text_frame:
return
# チャートIDを取得
title_frame = chart.chart_title.text_frame
chart_id = txt.search_first_el(title_frame.text)
if not chart_id:
return
# チャートタイトル中のチャートIDを削除
txt.replace_el_in_text_frame_with_str(title_frame, chart_id, '')
# チャートタイトル中のEL式の置換を行う
txt.replace_all_els_in_text_frame(title_frame, model)
chart_setting = pyel.eval_el(chart_id, model)
log.debug(u" Found chart_id: %s, chart_setting: %s" % (chart_id, chart_setting))
# チャートにデータを流し込む
_replace_chart_data_with_csv(chart, chart_id, chart_setting)
# 軸の最大値、最小値を設定
_set_value_axis(chart, chart_id, chart_setting)
def select_all_chart_shapes(slide):
return [ s.chart for s in slide.shapes if isinstance(s, GraphicFrame) and s.shape_type == 3 ]
|
import logging
from base64 import b64encode
from winrm.exceptions import WinRMOperationTimeoutError
from airflow.configuration import conf
from airflow.contrib.hooks.winrm_hook import WinRMHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
logging.getLogger('urllib3.connectionpool').setLevel(logging.ERROR)
class WinRMOperator(BaseOperator):
"""
WinRMOperator to execute commands on given remote host using the winrm_hook.
:param winrm_hook: predefined ssh_hook to use for remote execution
:type winrm_hook: airflow.contrib.hooks.winrm_hook.WinRMHook
:param ssh_conn_id: connection id from airflow Connections
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param command: command to execute on remote host. (templated)
:type command: str
:param timeout: timeout for executing the command.
:type timeout: int
"""
template_fields = ('command',)
@apply_defaults
def __init__(self,
winrm_hook=None,
ssh_conn_id=None,
remote_host=None,
command=None,
timeout=10,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.winrm_hook = winrm_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.command = command
self.timeout = timeout
def execute(self, context):
if self.ssh_conn_id and not self.winrm_hook:
self.log.info("Hook not found, creating...")
self.winrm_hook = WinRMHook(ssh_conn_id=self.ssh_conn_id)
if not self.winrm_hook:
raise AirflowException("Cannot operate without winrm_hook or ssh_conn_id.")
if self.remote_host is not None:
self.winrm_hook.remote_host = self.remote_host
if not self.command:
raise AirflowException("No command specified so nothing to execute here.")
winrm_client = self.winrm_hook.get_conn()
try:
self.log.info("Running command: '%s'...", self.command)
command_id = self.winrm_hook.winrm_protocol.run_command(
winrm_client,
self.command
)
# See: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
stdout_buffer = []
stderr_buffer = []
command_done = False
while not command_done:
try:
stdout, stderr, return_code, command_done = \
self.winrm_hook.winrm_protocol._raw_get_command_output(
winrm_client,
command_id
)
# Only buffer stdout if we need to so that we minimize memory usage.
if self.do_xcom_push:
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
for line in stdout.decode('utf-8').splitlines():
self.log.info(line)
for line in stderr.decode('utf-8').splitlines():
self.log.warning(line)
except WinRMOperationTimeoutError:
# this is an expected error when waiting for a
# long-running process, just silently retry
pass
self.winrm_hook.winrm_protocol.cleanup_command(winrm_client, command_id)
self.winrm_hook.winrm_protocol.close_shell(winrm_client)
except Exception as e:
raise AirflowException("WinRM operator error: {0}".format(str(e)))
if return_code == 0:
# returning output if do_xcom_push is set
enable_pickling = conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
return stdout_buffer
else:
return b64encode(b''.join(stdout_buffer)).decode('utf-8')
else:
error_msg = "Error running cmd: {0}, return code: {1}, error: {2}".format(
self.command,
return_code,
b''.join(stderr_buffer).decode('utf-8')
)
raise AirflowException(error_msg)
|
import collections
import six
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
from heat.engine import parameters
from heat.engine import rsrc_defn
from heat.engine import template
_RESOURCE_KEYS = (
RES_TYPE, RES_PROPERTIES, RES_METADATA, RES_DEPENDS_ON,
RES_DELETION_POLICY, RES_UPDATE_POLICY, RES_DESCRIPTION,
) = (
'Type', 'Properties', 'Metadata', 'DependsOn',
'DeletionPolicy', 'UpdatePolicy', 'Description',
)
class CfnTemplate(template.Template):
'''A stack template.'''
SECTIONS = (VERSION, ALTERNATE_VERSION, DESCRIPTION, MAPPINGS,
PARAMETERS, RESOURCES, OUTPUTS) = \
('AWSTemplateFormatVersion', 'HeatTemplateFormatVersion',
'Description', 'Mappings', 'Parameters', 'Resources', 'Outputs'
)
SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION, ALTERNATE_VERSION])
functions = {
'Fn::FindInMap': cfn_funcs.FindInMap,
'Fn::GetAZs': cfn_funcs.GetAZs,
'Ref': cfn_funcs.Ref,
'Fn::GetAtt': cfn_funcs.GetAtt,
'Fn::Select': cfn_funcs.Select,
'Fn::Join': cfn_funcs.Join,
'Fn::Base64': cfn_funcs.Base64,
}
def __getitem__(self, section):
'''Get the relevant section in the template.'''
if section not in self.SECTIONS:
raise KeyError(_('"%s" is not a valid template section') % section)
if section in self.SECTIONS_NO_DIRECT_ACCESS:
raise KeyError(
_('Section %s can not be accessed directly.') % section)
if section == self.DESCRIPTION:
default = 'No description'
else:
default = {}
# if a section is None (empty yaml section) return {}
# to be consistent with an empty json section.
return self.t.get(section) or default
def param_schemata(self):
params = self.t.get(self.PARAMETERS) or {}
return dict((name, parameters.Schema.from_dict(name, schema))
for name, schema in six.iteritems(params))
def parameters(self, stack_identifier, user_params):
return parameters.Parameters(stack_identifier, self,
user_params=user_params)
def resource_definitions(self, stack):
def rsrc_defn_item(name, snippet):
data = self.parse(stack, snippet)
def get_check_type(key, valid_types, typename, default=None):
if key in data:
field = data[key]
if not isinstance(field, valid_types):
args = {'name': name, 'key': key, 'typename': typename}
msg = _('Resource %(name)s %(key)s type'
'must be %(typename)s') % args
raise TypeError(msg)
return field
else:
return default
resource_type = get_check_type(RES_TYPE, basestring, 'string')
if resource_type is None:
args = {'name': name, 'type_key': RES_TYPE}
msg = _('Resource %(name)s is missing "%(type_key)s"') % args
raise KeyError(msg)
properties = get_check_type(RES_PROPERTIES,
(collections.Mapping,
function.Function),
'object')
metadata = get_check_type(RES_METADATA,
(collections.Mapping,
function.Function),
'object')
depends = get_check_type(RES_DEPENDS_ON,
collections.Sequence,
'list or string',
default=None)
if isinstance(depends, basestring):
depends = [depends]
deletion_policy = get_check_type(RES_DELETION_POLICY,
basestring,
'string')
update_policy = get_check_type(RES_UPDATE_POLICY,
(collections.Mapping,
function.Function),
'object')
description = get_check_type(RES_DESCRIPTION,
basestring,
'string',
default='')
defn = rsrc_defn.ResourceDefinition(name, resource_type,
properties, metadata,
depends,
deletion_policy,
update_policy,
description=description)
return name, defn
resources = self.t.get(self.RESOURCES) or {}
return dict(rsrc_defn_item(name, data)
for name, data in resources.items())
def add_resource(self, definition, name=None):
if name is None:
name = definition.name
hot_tmpl = definition.render_hot()
HOT_TO_CFN_ATTRS = {'type': RES_TYPE,
'properties': RES_PROPERTIES,
'metadata': RES_METADATA,
'depends_on': RES_DEPENDS_ON,
'deletion_policy': RES_DELETION_POLICY,
'update_policy': RES_UPDATE_POLICY}
cfn_tmpl = dict((HOT_TO_CFN_ATTRS[k], v) for k, v in hot_tmpl.items())
if len(cfn_tmpl.get(RES_DEPENDS_ON, [])) == 1:
cfn_tmpl[RES_DEPENDS_ON] = cfn_tmpl[RES_DEPENDS_ON][0]
if self.t.get(self.RESOURCES) is None:
self.t[self.RESOURCES] = {}
self.t[self.RESOURCES][name] = cfn_tmpl
class HeatTemplate(CfnTemplate):
functions = {
'Fn::FindInMap': cfn_funcs.FindInMap,
'Fn::GetAZs': cfn_funcs.GetAZs,
'Ref': cfn_funcs.Ref,
'Fn::GetAtt': cfn_funcs.GetAtt,
'Fn::Select': cfn_funcs.Select,
'Fn::Join': cfn_funcs.Join,
'Fn::Split': cfn_funcs.Split,
'Fn::Replace': cfn_funcs.Replace,
'Fn::Base64': cfn_funcs.Base64,
'Fn::MemberListToMap': cfn_funcs.MemberListToMap,
'Fn::ResourceFacade': cfn_funcs.ResourceFacade,
}
|
import TownLoader
import MMStreet
from toontown.suit import Suit
class MMTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = MMStreet.MMStreet
self.musicFile = 'phase_6/audio/bgm/MM_SZ.ogg'
self.activityMusicFile = 'phase_6/audio/bgm/MM_SZ_activity.ogg'
self.townStorageDNAFile = 'phase_6/dna/storage_MM_town.pdna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(2)
dnaFile = 'phase_6/dna/minnies_melody_land_' + str(self.canonicalBranchZone) + '.pdna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(2)
TownLoader.TownLoader.unload(self)
|
import itertools
from hazelcast.future import combine_futures, ImmediateFuture
from hazelcast.protocol.codec import map_add_entry_listener_codec, map_add_entry_listener_to_key_codec, \
map_add_entry_listener_with_predicate_codec, map_add_entry_listener_to_key_with_predicate_codec, \
map_add_index_codec, map_clear_codec, map_contains_key_codec, map_contains_value_codec, map_delete_codec, \
map_entry_set_codec, map_entries_with_predicate_codec, map_evict_codec, map_evict_all_codec, map_flush_codec, \
map_force_unlock_codec, map_get_codec, map_get_all_codec, map_get_entry_view_codec, map_is_empty_codec, \
map_is_locked_codec, map_key_set_codec, map_key_set_with_predicate_codec, map_load_all_codec, \
map_load_given_keys_codec, map_lock_codec, map_put_codec, map_put_all_codec, map_put_if_absent_codec, \
map_put_transient_codec, map_size_codec, map_remove_codec, map_remove_if_same_codec, \
map_remove_entry_listener_codec, map_replace_codec, map_replace_if_same_codec, map_set_codec, map_try_lock_codec, \
map_try_put_codec, map_try_remove_codec, map_unlock_codec, map_values_codec, map_values_with_predicate_codec, \
map_add_interceptor_codec, map_execute_on_all_keys_codec, map_execute_on_key_codec, map_execute_on_keys_codec, \
map_execute_with_predicate_codec
from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType, get_entry_listener_flags
from hazelcast.util import check_not_none, thread_id, to_millis
class Map(Proxy):
def add_entry_listener(self, include_value=False, key=None, predicate=None, added=None, removed=None, updated=None,
evicted=None, evict_all=None, clear_all=None, merged=None, expired=None):
flags = get_entry_listener_flags(added=added, removed=removed, updated=updated,
evicted=evicted, evict_all=evict_all, clear_all=clear_all, merged=merged,
expired=expired)
if key and predicate:
key_data = self._to_data(key)
predicate_data = self._to_data(predicate)
request = map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,
predicate_data, include_value,
flags, False)
elif key and not predicate:
key_data = self._to_data(key)
request = map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, include_value, flags,
False)
elif not key and predicate:
predicate = self._to_data(predicate)
request = map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate, include_value,
flags, False)
else:
request = map_add_entry_listener_codec.encode_request(self.name, include_value, flags, False)
def handle_event_entry(**_kwargs):
event = EntryEvent(self._to_object, **_kwargs)
if event.event_type == EntryEventType.added:
added(event)
elif event.event_type == EntryEventType.removed:
removed(event)
elif event.event_type == EntryEventType.updated:
updated(event)
elif event.event_type == EntryEventType.evicted:
evicted(event)
elif event.event_type == EntryEventType.evict_all:
evict_all(event)
elif event.event_type == EntryEventType.clear_all:
clear_all(event)
elif event.event_type == EntryEventType.merged:
merged(event)
elif event.event_type == EntryEventType.expired:
expired(event)
return self._start_listening(request,
lambda m: map_add_entry_listener_codec.handle(m,
handle_event_entry),
lambda r: map_add_entry_listener_codec.decode_response(r)[
'response'])
def add_index(self, attribute, ordered=False):
return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)
def add_interceptor(self, interceptor):
return self._encode_invoke(map_add_interceptor_codec, interceptor=self._to_data(interceptor))
def clear(self):
return self._encode_invoke(map_clear_codec)
def contains_key(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_contains_key_codec, key_data,
key=key_data, thread_id=thread_id())
def contains_value(self, value):
check_not_none(value, "value can't be None")
value_data = self._to_data(value)
return self._encode_invoke(map_contains_value_codec, value=value_data)
def delete(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_delete_codec, key_data, key=key_data,
thread_id=thread_id())
def entry_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_entries_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_entry_set_codec)
def evict(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_evict_codec, key_data, key=key_data,
thread_id=thread_id())
def evict_all(self):
return self._encode_invoke(map_evict_all_codec)
def execute_on_entries(self, entry_processor, predicate=None):
if predicate:
return self._encode_invoke(map_execute_with_predicate_codec, entry_processor=self._to_data(entry_processor),
predicate=self._to_data(predicate))
return self._encode_invoke(map_execute_on_all_keys_codec, entry_processor=self._to_data(entry_processor))
def execute_on_key(self, key, entry_processor):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_execute_on_key_codec, key_data, key=key_data,
entry_processor=self._to_data(entry_processor), thread_id=thread_id())
def execute_on_keys(self, keys, entry_processor):
key_list = []
for key in keys:
check_not_none(key, "key can't be None")
key_list.append(self._to_data(key))
return self._encode_invoke(map_execute_on_keys_codec, entry_processor=self._to_data(entry_processor),
keys=key_list)
def flush(self):
return self._encode_invoke(map_flush_codec)
def force_unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_force_unlock_codec, key_data, key=key_data)
def get(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_codec, key_data, key=key_data, thread_id=thread_id())
def get_all(self, keys):
check_not_none(keys, "keys can't be None")
if not keys:
return ImmediateFuture({})
partition_service = self._client.partition_service
partition_to_keys = {}
for key in keys:
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
partition_id = partition_service.get_partition_id(key_data)
try:
partition_to_keys[partition_id].append(key_data)
except KeyError:
partition_to_keys[partition_id] = [key_data]
futures = []
for partition_id, key_list in partition_to_keys.iteritems():
future = self._encode_invoke_on_partition(map_get_all_codec, partition_id, keys=key_list)
futures.append(future)
def merge(f):
return dict(itertools.chain(*f.result()))
return combine_futures(*futures).continue_with(merge)
def get_entry_view(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data,
thread_id=thread_id())
def is_empty(self):
return self._encode_invoke(map_is_empty_codec)
def is_locked(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data)
def key_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_key_set_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_key_set_codec)
def load_all(self, keys=None, replace_existing_values=True):
if keys:
key_data_list = map(self._to_data, keys)
return self._encode_invoke(map_load_given_keys_codec, keys=key_data_list,
replace_existing_values=replace_existing_values)
else:
return self._encode_invoke(map_load_all_codec,
replace_existing_values=replace_existing_values)
def lock(self, key, ttl=-1):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_lock_codec, key_data, key=key_data, thread_id=thread_id(),
ttl=to_millis(ttl))
def put(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(),
ttl=to_millis(ttl))
def put_all(self, map):
check_not_none(map, "map can't be None")
if not map:
return ImmediateFuture(None)
partition_service = self._client.partition_service
partition_map = {}
for key, value in map.iteritems():
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
entry = (self._to_data(key), self._to_data(value))
partition_id = partition_service.get_partition_id(entry[0])
try:
partition_map[partition_id].append(entry)
except KeyError:
partition_map[partition_id] = [entry]
futures = []
for partition_id, entry_list in partition_map.iteritems():
future = self._encode_invoke_on_partition(map_put_all_codec, partition_id,
entries=dict(entry_list))
futures.append(future)
return combine_futures(*futures)
def put_if_absent(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_if_absent_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))
def put_transient(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_put_transient_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id(), ttl=to_millis(ttl))
def remove(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_remove_codec, key_data, key=key_data,
thread_id=thread_id())
def remove_if_same(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_remove_if_same_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id())
def remove_entry_listener(self, registration_id):
return self._stop_listening(registration_id,
lambda i: map_remove_entry_listener_codec.encode_request(self.name, i))
def replace(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_replace_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id())
def replace_if_same(self, key, old_value, new_value):
check_not_none(key, "key can't be None")
check_not_none(old_value, "old_value can't be None")
check_not_none(new_value, "new_value can't be None")
key_data = self._to_data(key)
old_value_data = self._to_data(old_value)
new_value_data = self._to_data(new_value)
return self._encode_invoke_on_key(map_replace_if_same_codec, key_data, key=key_data,
test_value=old_value_data,
value=new_value_data, thread_id=thread_id())
def set(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_set_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(),
ttl=to_millis(ttl))
def size(self):
return self._encode_invoke(map_size_codec)
def try_lock(self, key, ttl=-1, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_lock_codec, key_data, key=key_data,
thread_id=thread_id(), lease=to_millis(ttl), timeout=to_millis(timeout))
def try_put(self, key, value, timeout=0):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(map_try_put_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(), timeout=to_millis(timeout))
def try_remove(self, key, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_remove_codec, key_data, key=key_data,
thread_id=thread_id(), timeout=to_millis(timeout))
def unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_unlock_codec, key_data, key=key_data,
thread_id=thread_id())
def values(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_values_codec)
|
import string
import copy
def explicit_element_decl_with_conf(i, words, element, name_subgraph, group, type_element):
comma=[]
config=[]
word=words[i+1]
index=string.find(word, '(')
for w in word.split(','):
if string.find(w,'(')!=-1 and string.find(w,')')==-1:
config.append(w[string.find(w,'(')+1:len(w)])
elif string.find(w,'(')!=-1 and string.find(w,')')!=-1:
config.append(w[string.find(w,'(')+1:len(w)-1])
elif string.find(w,')')!=-1:
config.append(w[0:len(w)-1])
else:
config.append(w)
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
if group[len(group)-1] == '.':
group = group[0:len(group)-1]
if words[0] == '[':
index = string.find(words, ']')
words = words[index+1:]
element[len(element)]=({'element':word[0:index], 'name':name_subgraph+words[i-1], 'config':config,'group':[group], 'node_type': type_element})
def explicit_element_decl_without_conf(i, words, element, name_subgraph, group, type_element):
word = ''
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
if group[len(group)-1] == '.':
group = group[0:len(group)-1]
if words[i-1][0] == '[':
index = string.find(words[i-1], ']')
word = words[i-1][index+1:]
else:
word = words[i-1]
element[len(element)]=({'element':words[i+1], 'name':name_subgraph+word, 'config':[],'group':[group], 'node_type': type_element})
def implicit_element_decl_with_conf(i, words,element, name_subgraph, group, words2):
config=[]
word=words[i]
index=string.find(word, '(')
for w in word.split(','):
if string.find(w,'(')!=-1 and string.find(w,')')==-1:
config.append(w[string.find(w,'(')+1:len(w)])
elif string.find(w,'(')!=-1 and string.find(w,')')!=-1:
config.append(w[string.find(w,'(')+1:len(w)-1])
elif string.find(w,')'):
config.append(w[0:len(w)-1])
else:
config.append(w)
name=nameGenerator(element, word[0:index])
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
element[len(element)]=({'element':word[0:index], 'name':name_subgraph+name, 'config':config,'group':[group], 'node_type':'element'})
words2[i] = name_subgraph+name
def implicit_element_decl_without_conf(i,words,element, name_subgraph, group, words2):
name=nameGenerator(element, words[i])
if name_subgraph != '' and name_subgraph[len(name_subgraph)-1] != '.':
name_subgraph = name_subgraph+'.'
element[len(element)]=({'element':words[i], 'name':name_subgraph+name, 'config':[],'group':[group], 'node_type': 'element'})
words2[i] = name_subgraph+name
def subgraph_element_name(line, compound_element, element, group):
name=nameGenerator(element, 'subgraph')
element[len(element)]=({'element':'Compound_Element', 'name':name, 'config':[],'group':[group], 'node_type': 'compound_element'})
compound_element[len(compound_element)] = ({'name':name, 'compound':line})
return name
def rename_class_element(words, words1,words3, name_ele, name):
for i in range (0,len(words1)): #Rinomina gli elementi espliciti della riga
if i >= len(words1):
continue
if words1[i] != '::' and words1[i] != '->' and string.find(words[i],'@') == -1 and string.find(words1[i], 'input') == -1 and string.find(words1[i], 'output') == -1:
if string.find(words1[i], '[') != -1:
start = string.find(words1[i], '[')
stop = string.find(words1[i], ']')
if start == 0:
name_element = words1[i][stop:]
else:
name_element = words1[i][0:start]
words1[i] = name_ele+'.'+name_element
else:
words1[i] = name_ele+'.'+words[i]
try:
index = words1.index('::')
del words1[index+1]
counter = len(name_ele)
if name_ele[counter-1] == '.':
words1[index-1] = name_ele + words1[index-1]
else:
words1[index-1] = name_ele + '.' + words1[index-1]
del words1[index]
except ValueError:
break
def rename_compound_element(words3, compound, element_renamed):
for i in range(0,len(words3)): # rinomina gli elementi del compound contenuti in word3
try:
index = words3.index('::')
del words3[index+1]
words3[index-1] = compound[1]['name']+'.'+ words3[index-1]
del words3[index]
except ValueError:
break
compound[1]['compound']=words3
for i in range(0,len(words3)): # rinomina gli elementi precedentementi dichiarati e che hanno ancora
for e in element_renamed.items(): # ancora il loro nome originale
if words3[i] == e[1]['origin_name']:
words3[i] = e[1]['new_name']
elif string.find(words3[i], '[')!=-1:
start = string.find(words3[i], '[')
stop = string.find(words3[i], ']')
if start == 0:
name = words3[i][stop+1:]
elif stop == len(words3[i])-1:
name = words3[i][0:start]
if name == e[1]['origin_name']:
words3[i] = e[1]['new_name']
def nameGenerator(element, type_element): #nome di default class@num
implicit_name = False
for e in element.items():
if string.find(e[1]['name'],'@')!=-1 and string.find(e[1]['name'],'.')==-1:
index = string.find(e[1]['name'],'@')
num = int(e[1]['name'][index+1:])
implicit_name = True
if implicit_name :
name = type_element+'@'+str(num+1)
else:
name = type_element+'@0'
return name
def load_list(line, words):
conf=False
port=False
word2=''
word3=''
line_old=' ['
line_new='['
line=line.replace(line_old,line_new)
line_old=['::','->',' ;']
line_new=[' :: ',' -> ',';']
for i in range(0,len(line_old)): #gestisce le dichiarazione esplice degli elementi
line=line.replace(line_old[i],line_new[i]) #es.: name::element o name :: element
for word in line.split():
if conf:
if word[len(word)-1]==')' or word[len(word)-2]==')':
word=word2+' '+word
conf=False
else:
word2=word2+' '+word
continue
if string.find(word,'(')!=-1 and string.find(word,')')==-1: #concatena le stesse config di un elemento
conf=True
word2=word
continue
elif word[len(word)-1]==']' and word[0]=='[' and words[len(words)-1] == '->': #usato per gestire il tipo di dichiarazione di porta d'ingresso
word3=word #es.: [num]port o [num] port
port=True
continue
elif port:
word=word3+''+word
port=False
if word[len(word)-1]==';':
word=word[0:len(word)-1]
words.append(word)
words_new=[]
return words
def handle_edgeslevel(connection):
index = 0
for c in connection.items():
target_level = '0'
source_level = '0'
for w in range(0,len(c[1]['target'])):
if c[1]['target'][w] == '.':
index = w
target_level = c[1]['target'][0:index]
for w in range(0,len(c[1]['source'])):
if c[1]['source'][w] == '.':
index = w
source_level = c[1]['source'][0:index]
if source_level == target_level and source_level != '0' and target_level != '0':
c[1]['group'].append(source_level)
elif source_level == '0' and target_level == '0':
c[1]['group'].append('click')
else:
c[1]['group'].append('Null')
connection2 = connection.copy()
for c in connection.items():
if c[1]['group'] != 'click':
for c1 in connection2.items():
if c1[1]['target'] == c[1]['group']:
c[1]['depth'] = c1[1]['depth']+1
def check_element(check, element_name, words):
word = words
if string.find(words, '[') == 0:
index = string.find(words, ']')
word = words[index+1:]
elif string.find(words,']') == len(words)-1:
index = string.find(words,'[')
word = words[0:index]
start = 0
for i in range(0,len(element_name)):
if element_name[i]=='.':
start = i + 1
if word == element_name[start:]:
check = True
return check
|
from tapiriik.services import *
from .service_record import ServiceRecord
from tapiriik.database import db, cachedb
from bson.objectid import ObjectId
class Service:
# These options are used as the back for all service record's configurations
_globalConfigurationDefaults = {
"sync_private": False,
"allow_activity_flow_exception_bypass_via_self": False
}
def Init():
Service._serviceMappings = {x.ID: x for x in Service.List()}
for svc in Service.List():
if svc.IDAliases:
Service._serviceMappings.update({x: svc for x in svc.IDAliases})
def FromID(id):
if id in Service._serviceMappings:
return Service._serviceMappings[id]
raise ValueError
def List():
return [RunKeeper, Strava, GarminConnect, SportTracks, Dropbox, TrainingPeaks, RideWithGPS, Endomondo, Motivato, NikePlus, VeloHero, TrainerRoad] + PRIVATE_SERVICES
def PreferredDownloadPriorityList():
# Ideally, we'd make an informed decision based on whatever features the activity had
# ...but that would require either a) downloading it from evry service or b) storing a lot more activity metadata
# So, I think this will do for now
return [
TrainerRoad, # Special case, since TR has a lot more data in some very specific areas
GarminConnect, # The reference
SportTracks, # Pretty much equivalent to GC, no temperature (not that GC temperature works all thar well now, but I digress)
TrainingPeaks, # No seperate run cadence, but has temperature
Dropbox, # Equivalent to any of the above
RideWithGPS, # Uses TCX for everything, so same as Dropbox
VeloHero, # PWX export, no temperature
Strava, # No laps
Endomondo, # No laps, no cadence
RunKeeper, # No laps, no cadence, no power
Motivato,
NikePlus
] + PRIVATE_SERVICES
def WebInit():
from tapiriik.settings import WEB_ROOT
from django.core.urlresolvers import reverse
for itm in Service.List():
itm.WebInit()
itm.UserDisconnectURL = WEB_ROOT + reverse("auth_disconnect", kwargs={"service": itm.ID})
def GetServiceRecordWithAuthDetails(service, authDetails):
return ServiceRecord(db.connections.find_one({"Service": service.ID, "Authorization": authDetails}))
def GetServiceRecordByID(uid):
return ServiceRecord(db.connections.find_one({"_id": ObjectId(uid)}))
def EnsureServiceRecordWithAuth(service, uid, authDetails, extendedAuthDetails=None, persistExtendedAuthDetails=False):
from tapiriik.auth.credential_storage import CredentialStore
if persistExtendedAuthDetails and not service.RequiresExtendedAuthorizationDetails:
raise ValueError("Attempting to persist extended auth details on service that doesn't use them")
# think this entire block could be replaced with an upsert...
serviceRecord = ServiceRecord(db.connections.find_one({"ExternalID": uid, "Service": service.ID}))
# Coming out of CredentialStorage these are objects that can't be stuffed into mongodb right away
# Should really figure out how to mangle pymongo into doing the serialization for me...
extendedAuthDetailsForStorage = CredentialStore.FlattenShadowedCredentials(extendedAuthDetails) if extendedAuthDetails else None
if serviceRecord is None:
db.connections.insert({"ExternalID": uid, "Service": service.ID, "SynchronizedActivities": [], "Authorization": authDetails, "ExtendedAuthorization": extendedAuthDetailsForStorage if persistExtendedAuthDetails else None})
serviceRecord = ServiceRecord(db.connections.find_one({"ExternalID": uid, "Service": service.ID}))
serviceRecord.ExtendedAuthorization = extendedAuthDetails # So SubscribeToPartialSyncTrigger can use it (we don't save the whole record after this point)
if service.PartialSyncTriggerRequiresPolling:
service.SubscribeToPartialSyncTrigger(serviceRecord) # The subscription is attached more to the remote account than to the local one, so we subscribe/unsubscribe here rather than in User.ConnectService, etc.
elif serviceRecord.Authorization != authDetails or (hasattr(serviceRecord, "ExtendedAuthorization") and serviceRecord.ExtendedAuthorization != extendedAuthDetailsForStorage):
db.connections.update({"ExternalID": uid, "Service": service.ID}, {"$set": {"Authorization": authDetails, "ExtendedAuthorization": extendedAuthDetailsForStorage if persistExtendedAuthDetails else None}})
# if not persisted, these details are stored in the cache db so they don't get backed up
if service.RequiresExtendedAuthorizationDetails:
if not persistExtendedAuthDetails:
cachedb.extendedAuthDetails.update({"ID": serviceRecord._id}, {"ID": serviceRecord._id, "ExtendedAuthorization": extendedAuthDetailsForStorage}, upsert=True)
else:
cachedb.extendedAuthDetails.remove({"ID": serviceRecord._id})
return serviceRecord
def PersistExtendedAuthDetails(serviceRecord):
if not serviceRecord.HasExtendedAuthorizationDetails():
raise ValueError("No extended auth details to persist")
if serviceRecord.ExtendedAuthorization:
# Already persisted, nothing to do
return
extAuthRecord = cachedb.extendedAuthDetails.find_one({"ID": serviceRecord._id})
if not extAuthRecord:
raise ValueError("Service record claims to have extended auth, facts suggest otherwise")
else:
extAuth = extAuthRecord["ExtendedAuthorization"]
db.connections.update({"_id": serviceRecord._id}, {"$set": {"ExtendedAuthorization": extAuth}})
cachedb.extendedAuthDetails.remove({"ID": serviceRecord._id})
def DeleteServiceRecord(serviceRecord):
svc = serviceRecord.Service
svc.DeleteCachedData(serviceRecord)
if svc.PartialSyncTriggerRequiresPolling and serviceRecord.PartialSyncTriggerSubscribed:
svc.UnsubscribeFromPartialSyncTrigger(serviceRecord)
svc.RevokeAuthorization(serviceRecord)
cachedb.extendedAuthDetails.remove({"ID": serviceRecord._id})
db.connections.remove({"_id": serviceRecord._id})
Service.Init()
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.system_availability_managers import AvailabilityManagerHighTemperatureTurnOn
log = logging.getLogger(__name__)
class TestAvailabilityManagerHighTemperatureTurnOn(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_availabilitymanagerhightemperatureturnon(self):
pyidf.validation_level = ValidationLevel.error
obj = AvailabilityManagerHighTemperatureTurnOn()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_sensor_node_name = "node|Sensor Node Name"
obj.sensor_node_name = var_sensor_node_name
# real
var_temperature = 3.3
obj.temperature = var_temperature
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.availabilitymanagerhightemperatureturnons[0].name, var_name)
self.assertEqual(idf2.availabilitymanagerhightemperatureturnons[0].sensor_node_name, var_sensor_node_name)
self.assertAlmostEqual(idf2.availabilitymanagerhightemperatureturnons[0].temperature, var_temperature)
|
import contextlib
import os
import fixtures
import mock
import webob.exc
from neutron.common import constants
from neutron.common.test_lib import test_config
from neutron.common import topics
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron import manager
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec import nec_plugin
from neutron.tests.unit.nec import fake_ofc_manager
from neutron.tests.unit import test_db_plugin as test_plugin
PLUGIN_NAME = 'neutron.plugins.nec.nec_plugin.NECPluginV2'
OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager'
NOTIFIER = 'neutron.plugins.nec.nec_plugin.NECPluginV2AgentNotifierApi'
NEC_PLUGIN_INI = """
[DEFAULT]
api_extensions_path = neutron/plugins/nec/extensions
[OFC]
driver = neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver
enable_packet_filter = False
"""
class NecPluginV2TestCaseBase(object):
_nec_ini = NEC_PLUGIN_INI
def _set_nec_ini(self):
self.nec_ini_file = self.useFixture(fixtures.TempDir()).join("nec.ini")
with open(self.nec_ini_file, 'w') as f:
f.write(self._nec_ini)
if 'config_files' in test_config.keys():
for c in test_config['config_files']:
if c.rfind("/nec.ini") > -1:
test_config['config_files'].remove(c)
test_config['config_files'].append(self.nec_ini_file)
else:
test_config['config_files'] = [self.nec_ini_file]
def _clean_nec_ini(self):
test_config['config_files'].remove(self.nec_ini_file)
os.remove(self.nec_ini_file)
self.nec_ini_file = None
def patch_remote_calls(self, use_stop=False):
self.plugin_notifier_p = mock.patch(NOTIFIER)
self.ofc_manager_p = mock.patch(OFC_MANAGER)
self.plugin_notifier_p.start()
self.ofc_manager_p.start()
# When using mock.patch.stopall, we need to ensure
# stop is not used anywhere in a single test.
# In Neutron several tests use stop for each patched object,
# so we need to take care of both cases.
if use_stop:
self.addCleanup(self.plugin_notifier_p.stop)
self.addCleanup(self.ofc_manager_p.stop)
def setup_nec_plugin_base(self, use_stop_all=True,
use_stop_each=False):
# If use_stop_each is set, use_stop_all cannot be set.
if use_stop_all and not use_stop_each:
self.addCleanup(mock.patch.stopall)
self._set_nec_ini()
self.addCleanup(self._clean_nec_ini)
self.patch_remote_calls(use_stop_each)
class NecPluginV2TestCase(NecPluginV2TestCaseBase,
test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def rpcapi_update_ports(self, agent_id='nec-q-agent.fake',
datapath_id="0xabc", added=[], removed=[]):
kwargs = {'topic': topics.AGENT,
'agent_id': agent_id,
'datapath_id': datapath_id,
'port_added': added, 'port_removed': removed}
self.callback_nec.update_ports(self.context, **kwargs)
def setUp(self, plugin=None, ext_mgr=None):
self.addCleanup(mock.patch.stopall)
self._set_nec_ini()
self.addCleanup(self._clean_nec_ini)
plugin = plugin or self._plugin_name
super(NecPluginV2TestCase, self).setUp(plugin, ext_mgr=ext_mgr)
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.ofc = fake_ofc_manager.patch_ofc_manager()
self.ofc = self.plugin.ofc
self.callback_nec = nec_plugin.NECPluginV2RPCCallbacks(self.plugin)
self.context = context.get_admin_context()
self.net_create_status = 'ACTIVE'
self.port_create_status = 'DOWN'
class TestNecBasicGet(test_plugin.TestBasicGet, NecPluginV2TestCase):
pass
class TestNecV2HTTPResponse(test_plugin.TestV2HTTPResponse,
NecPluginV2TestCase):
pass
class TestNecPortsV2(test_plugin.TestPortsV2, NecPluginV2TestCase):
def test_delete_ports(self):
with self.subnet() as subnet:
with contextlib.nested(
self.port(subnet=subnet, device_owner='test-owner',
no_delete=True),
self.port(subnet=subnet, device_owner='test-owner',
no_delete=True),
self.port(subnet=subnet, device_owner='other-owner'),
) as (p1, p2, p3):
network_id = subnet['subnet']['network_id']
filters = {'network_id': [network_id],
'device_owner': ['test-owner']}
self.plugin.delete_ports(self.context, filters)
self._show('ports', p1['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p2['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
class TestNecNetworksV2(test_plugin.TestNetworksV2, NecPluginV2TestCase):
pass
class TestNecPortsV2Callback(NecPluginV2TestCase):
def _get_portinfo(self, port_id):
return ndb.get_portinfo(self.context.session, port_id)
def test_portinfo_create(self):
with self.port() as port:
port_id = port['port']['id']
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'DOWN')
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
self.assertIsNone(self._get_portinfo(port_id))
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertIsNotNone(self._get_portinfo(port_id))
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
def test_portinfo_delete_before_port_deletion(self):
self._test_portinfo_delete()
def test_portinfo_delete_after_port_deletion(self):
self._test_portinfo_delete(portinfo_delete_first=False)
def _test_portinfo_delete(self, portinfo_delete_first=True):
with self.port() as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 456}
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
self.assertIsNone(self._get_portinfo(port_id))
self.rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertIsNotNone(self._get_portinfo(port_id))
# Before port-deletion, switch port removed message is sent.
if portinfo_delete_first:
self.rpcapi_update_ports(removed=[port_id])
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(self._get_portinfo(port_id))
# The port and portinfo is expected to delete when exiting with-clause.
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(self._get_portinfo(port_id))
if not portinfo_delete_first:
self.rpcapi_update_ports(removed=[port_id])
# Ensure port deletion is called once.
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(self._get_portinfo(port_id))
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
def test_portinfo_added_unknown_port(self):
portinfo = {'id': 'dummy-p1', 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.assertIsNone(ndb.get_portinfo(self.context.session,
'dummy-p1'))
self.assertEqual(self.ofc.exists_ofc_port.call_count, 0)
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
def _test_portinfo_change(self, portinfo_change_first=True):
with self.port() as port:
port_id = port['port']['id']
self.assertEqual(self.ofc.create_ofc_port.call_count, 0)
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertEqual(ndb.get_portinfo(self.context.session,
port_id).port_no, 123)
if portinfo_change_first:
portinfo = {'id': port_id, 'port_no': 456}
self.rpcapi_update_ports(added=[portinfo])
# OFC port is recreated.
self.assertEqual(self.ofc.create_ofc_port.call_count, 2)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertEqual(ndb.get_portinfo(self.context.session,
port_id).port_no, 456)
if not portinfo_change_first:
# The port is expected to delete when exiting with-clause.
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
portinfo = {'id': port_id, 'port_no': 456}
self.rpcapi_update_ports(added=[portinfo])
# No OFC operations are expected.
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
self.assertIsNone(ndb.get_portinfo(self.context.session, port_id))
def test_portinfo_change(self):
self._test_portinfo_change()
def test_portinfo_change_for_nonexisting_port(self):
self._test_portinfo_change(portinfo_change_first=False)
def test_port_migration(self):
agent_id_a, datapath_id_a, port_no_a = 'nec-q-agent.aa', '0xaaa', 10
agent_id_b, datapath_id_b, port_no_b = 'nec-q-agent.bb', '0xbbb', 11
with self.port() as port:
port_id = port['port']['id']
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'DOWN')
portinfo_a = {'id': port_id, 'port_no': port_no_a}
self.rpcapi_update_ports(agent_id=agent_id_a,
datapath_id=datapath_id_a,
added=[portinfo_a])
portinfo_b = {'id': port_id, 'port_no': port_no_b}
self.rpcapi_update_ports(agent_id=agent_id_b,
datapath_id=datapath_id_b,
added=[portinfo_b])
self.rpcapi_update_ports(agent_id=agent_id_a,
datapath_id=datapath_id_a,
removed=[port_id])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertTrue(self.ofc.ofc_ports[port_id])
expected = [
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY),
mock.call.exists_ofc_port(mock.ANY, port_id),
mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(2, self.ofc.create_ofc_port.call_count)
self.assertEqual(1, self.ofc.delete_ofc_port.call_count)
def test_portinfo_readd(self):
with self.port() as port:
port_id = port['port']['id']
self.plugin.get_port(self.context, port_id)
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertIsNotNone(self._get_portinfo(port_id))
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
sport = self.plugin.get_port(self.context, port_id)
self.assertEqual(sport['status'], 'ACTIVE')
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 0)
self.assertIsNotNone(self._get_portinfo(port_id))
class TestNecPluginDbTest(NecPluginV2TestCase):
def test_update_resource(self):
with self.network() as network:
self.assertEqual("ACTIVE", network['network']['status'])
net_id = network['network']['id']
for status in ["DOWN", "BUILD", "ERROR", "ACTIVE"]:
self.plugin._update_resource_status(
self.context, 'network', net_id,
getattr(constants, 'NET_STATUS_%s' % status))
n = self.plugin._get_network(self.context, net_id)
self.assertEqual(status, n.status)
class TestNecPluginOfcManager(NecPluginV2TestCase):
def setUp(self):
super(TestNecPluginOfcManager, self).setUp()
self.ofc = self.plugin.ofc
def _create_resource(self, resource, data):
collection = resource + 's'
data = {resource: data}
req = self.new_create_request(collection, data)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _update_resource(self, resource, id, data):
collection = resource + 's'
data = {resource: data}
req = self.new_update_request(collection, data, id)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _show_resource(self, resource, id):
collection = resource + 's'
req = self.new_show_request(collection, id)
res = self.deserialize(self.fmt, req.get_response(self.api))
return res[resource]
def _list_resource(self, resource):
collection = resource + 's'
req = self.new_list_request(collection)
res = req.get_response(self.api)
return res[collection]
def _delete_resource(self, resource, id):
collection = resource + 's'
req = self.new_delete_request(collection, id)
res = req.get_response(self.api)
return res.status_int
def test_create_network(self):
net = None
ctx = mock.ANY
with self.network() as network:
net = network['network']
self.assertEqual(network['network']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_network_with_admin_state_down(self):
net = None
ctx = mock.ANY
with self.network(admin_state_up=False) as network:
net = network['network']
self.assertEqual(network['network']['status'], 'DOWN')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_two_network(self):
nets = []
ctx = mock.ANY
with self.network() as net1:
nets.append(net1['network'])
self.assertEqual(net1['network']['status'], 'ACTIVE')
with self.network() as net2:
nets.append(net2['network'])
self.assertEqual(net2['network']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, nets[0]['id'],
nets[0]['name']),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, nets[1]['id'],
nets[1]['name']),
mock.call.delete_ofc_network(ctx, nets[1]['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, nets[0]['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_network_fail(self):
self.ofc.create_ofc_network.side_effect = nexc.OFCException(
reason='hoge')
net = None
ctx = mock.ANY
# NOTE: We don't delete network through api, but db will be cleaned in
# tearDown(). When OFCManager has failed to create a network on OFC,
# it does not keeps ofc_network entry and will fail to delete this
# network from OFC. Deletion of network is not the scope of this test.
with self.network(do_delete=False) as network:
net = network['network']
self.assertEqual(net['status'], 'ERROR')
net_ref = self._show('networks', net['id'])
self.assertEqual(net_ref['network']['status'], 'ERROR')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name'])
]
self.ofc.assert_has_calls(expected)
def test_update_network(self):
net = None
ctx = mock.ANY
with self.network() as network:
net = network['network']
self.assertEqual(network['network']['status'], 'ACTIVE')
net_ref = self._show('networks', net['id'])
self.assertEqual(net_ref['network']['status'], 'ACTIVE')
# Set admin_state_up to False
res = self._update_resource('network', net['id'],
{'admin_state_up': False})
self.assertFalse(res['admin_state_up'])
self.assertEqual(res['status'], 'DOWN')
net_ref = self._show('networks', net['id'])
self.assertEqual(net_ref['network']['status'], 'DOWN')
# Set admin_state_up to True
res = self._update_resource('network', net['id'],
{'admin_state_up': True})
self.assertTrue(res['admin_state_up'])
self.assertEqual(res['status'], 'ACTIVE')
net_ref = self._show('networks', net['id'])
self.assertEqual(net_ref['network']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_port_no_ofc_creation(self):
net = None
p1 = None
ctx = mock.ANY
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
p1 = port['port']
net_id = port['port']['network_id']
net = self._show_resource('network', net_id)
self.assertEqual(net['status'], 'ACTIVE')
self.assertEqual(p1['status'], 'DOWN')
p1_ref = self._show('ports', p1['id'])
self.assertEqual(p1_ref['port']['status'], 'DOWN')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_create_port_with_ofc_creation(self):
net = None
p1 = None
ctx = mock.ANY
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
p1 = port['port']
net_id = port['port']['network_id']
net = self._show_resource('network', net_id)
self.assertEqual(net['status'], 'ACTIVE')
self.assertEqual(p1['status'], 'DOWN')
p1_ref = self._show('ports', p1['id'])
self.assertEqual(p1_ref['port']['status'], 'DOWN')
# Check the port is not created on OFC
self.assertFalse(self.ofc.create_ofc_port.call_count)
# Register portinfo, then the port is created on OFC
portinfo = {'id': p1['id'], 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
p1_ref = self._show('ports', p1['id'])
self.assertEqual(p1_ref['port']['status'], 'ACTIVE')
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.create_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_delete_network_with_dhcp_port(self):
ctx = mock.ANY
with self.network() as network:
with self.subnet(network=network):
net = network['network']
p = self._create_resource('port',
{'network_id': net['id'],
'tenant_id': net['tenant_id'],
'device_owner': 'network:dhcp',
'device_id': 'dhcp-port1'})
# Make sure that the port is created on OFC.
portinfo = {'id': p['id'], 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
# In a case of dhcp port, the port is deleted automatically
# when delete_network.
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id,
net['id'], net['name']),
mock.call.exists_ofc_port(ctx, p['id']),
mock.call.create_ofc_port(ctx, p['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p['id']),
mock.call.delete_ofc_port(ctx, p['id'], mock.ANY),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_delete_network_with_ofc_deletion_failure(self):
self.ofc.set_raise_exc('delete_ofc_network',
nexc.OFCException(reason='hoge'))
with self.network() as net:
net_id = net['network']['id']
self._delete('networks', net_id,
expected_code=webob.exc.HTTPInternalServerError.code)
net_ref = self._show('networks', net_id)
self.assertEqual(net_ref['network']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_network', None)
ctx = mock.ANY
tenant = mock.ANY
net_name = mock.ANY
net = mock.ANY
expected = [
mock.call.create_ofc_network(ctx, tenant, net_id, net_name),
mock.call.delete_ofc_network(ctx, net_id, net),
mock.call.delete_ofc_network(ctx, net_id, net),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_network.call_count, 2)
def test_delete_network_with_deactivating_auto_delete_port_failure(self):
self.ofc.set_raise_exc('delete_ofc_port',
nexc.OFCException(reason='hoge'))
with self.network(do_delete=False) as net:
net_id = net['network']['id']
device_owner = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS[0]
port = self._make_port(self.fmt, net_id, device_owner=device_owner)
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self._delete('networks', net_id,
expected_code=webob.exc.HTTPInternalServerError.code)
net_ref = self._show('networks', net_id)
self.assertEqual(net_ref['network']['status'], 'ACTIVE')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_port', None)
self._delete('networks', net_id)
ctx = mock.ANY
tenant = mock.ANY
net_name = mock.ANY
net = mock.ANY
port = mock.ANY
expected = [
mock.call.create_ofc_network(ctx, tenant, net_id, net_name),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
mock.call.delete_ofc_network(ctx, net_id, net)
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_network.call_count, 1)
def test_update_port(self):
self._test_update_port_with_admin_state(resource='port')
def test_update_network_with_ofc_port(self):
self._test_update_port_with_admin_state(resource='network')
def _test_update_port_with_admin_state(self, resource='port'):
net = None
p1 = None
ctx = mock.ANY
if resource == 'network':
net_ini_admin_state = False
port_ini_admin_state = True
else:
net_ini_admin_state = True
port_ini_admin_state = False
with self.network(admin_state_up=net_ini_admin_state) as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet,
admin_state_up=port_ini_admin_state) as port:
p1 = port['port']
net_id = port['port']['network_id']
res_id = net_id if resource == 'network' else p1['id']
self.assertEqual(p1['status'], 'DOWN')
net = self._show_resource('network', net_id)
# Check the port is not created on OFC
self.assertFalse(self.ofc.create_ofc_port.call_count)
# Register portinfo, then the port is created on OFC
portinfo = {'id': p1['id'], 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.assertFalse(self.ofc.create_ofc_port.call_count)
res = self._update_resource(resource, res_id,
{'admin_state_up': True})
self.assertEqual(res['status'], 'ACTIVE')
self.assertEqual(self.ofc.create_ofc_port.call_count, 1)
self.assertFalse(self.ofc.delete_ofc_port.call_count)
res = self._update_resource(resource, res_id,
{'admin_state_up': False})
self.assertEqual(res['status'], 'DOWN')
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
expected = [
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_tenant(ctx, self._tenant_id),
mock.call.create_ofc_network(ctx, self._tenant_id, net['id'],
net['name']),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.create_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY),
mock.call.exists_ofc_port(ctx, p1['id']),
mock.call.delete_ofc_network(ctx, net['id'], mock.ANY),
mock.call.exists_ofc_tenant(ctx, self._tenant_id),
mock.call.delete_ofc_tenant(ctx, self._tenant_id)
]
self.ofc.assert_has_calls(expected)
def test_update_port_with_ofc_creation_failure(self):
with self.port(admin_state_up=False) as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.ofc.set_raise_exc('create_ofc_port',
nexc.OFCException(reason='hoge'))
body = {'port': {'admin_state_up': True}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'ERROR')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
body = {'port': {'admin_state_up': False}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'ERROR')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
self.ofc.set_raise_exc('create_ofc_port', None)
body = {'port': {'admin_state_up': True}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'ACTIVE')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ACTIVE')
ctx = mock.ANY
port = mock.ANY
expected = [
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.create_ofc_port.call_count, 2)
def test_update_port_with_ofc_deletion_failure(self):
with self.port() as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self.ofc.set_raise_exc('delete_ofc_port',
nexc.OFCException(reason='hoge'))
body = {'port': {'admin_state_up': False}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'ERROR')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
body = {'port': {'admin_state_up': True}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'ERROR')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_port', None)
body = {'port': {'admin_state_up': False}}
res = self._update('ports', port_id, body)
self.assertEqual(res['port']['status'], 'DOWN')
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'DOWN')
ctx = mock.ANY
port = mock.ANY
expected = [
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 2)
def test_delete_port_with_ofc_deletion_failure(self):
self.ofc.set_raise_exc('delete_ofc_port',
nexc.OFCException(reason='hoge'))
with self.port() as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self._delete('ports', port_id,
expected_code=webob.exc.HTTPInternalServerError.code)
port_ref = self._show('ports', port_id)
self.assertEqual(port_ref['port']['status'], 'ERROR')
self.ofc.set_raise_exc('delete_ofc_port', None)
ctx = mock.ANY
port = mock.ANY
expected = [
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port)
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 2)
def _test_delete_port_for_disappeared_ofc_port(self, raised_exc):
self.ofc.set_raise_exc('delete_ofc_port', raised_exc)
with self.port(no_delete=True) as port:
port_id = port['port']['id']
portinfo = {'id': port_id, 'port_no': 123}
self.rpcapi_update_ports(added=[portinfo])
self._delete('ports', port_id)
# Check the port on neutron db is deleted. NotFound for
# neutron port itself should be handled by called. It is
# consistent with ML2 behavior, but it may need to be
# revisit.
self._show('ports', port_id,
expected_code=webob.exc.HTTPNotFound.code)
ctx = mock.ANY
port = mock.ANY
expected = [
mock.call.exists_ofc_port(ctx, port_id),
mock.call.create_ofc_port(ctx, port_id, port),
mock.call.exists_ofc_port(ctx, port_id),
mock.call.delete_ofc_port(ctx, port_id, port),
]
self.ofc.assert_has_calls(expected)
self.assertEqual(self.ofc.delete_ofc_port.call_count, 1)
def test_delete_port_for_nonexist_ofc_port(self):
self._test_delete_port_for_disappeared_ofc_port(
nexc.OFCResourceNotFound(resource='ofc_port'))
def test_delete_port_for_noofcmap_ofc_port(self):
self._test_delete_port_for_disappeared_ofc_port(
nexc.OFCMappingNotFound(resource='port', neutron_id='port1'))
|
import requests
url = "https://maps.googleapis.com/maps/api/distancematrix/json?origins=Seattle%2C%20WA&destinations=North%20Fork%2C%20WA&avoid=highways&units=imperial&arrival_time=1614709737&traffic_model=pessimistic&mode=transit&transit_mode=bus&transit_routing_preference=less_walking&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
|
import socket
import struct
import json
import time
import sys
import re
ZABBIX_SERVER = "127.0.0.1"
ZABBIX_PORT = 10051
class ZSend:
def __init__(self, server=ZABBIX_SERVER, port=ZABBIX_PORT, verbose=False):
self.zserver = server
self.zport = port
self.verbose = verbose
self.list = []
self.inittime = int(round(time.time()))
self.clock_flag = False
def add_data(self, host, key, value, clock=None):
obj = {
'host': host,
'key': key,
'value': value,
}
if clock:
obj['clock'] = clock
self.clock_flag = True
self.list.append(obj)
def print_vals(self):
for elem in self.list:
print( u'{0}'.format(elem) )
print( u'Count: {0}'.format(len(self.list)) )
def build_all(self):
send_data = {
"request": "sender data",
"data": [],
}
if self.clock_flag:
send_data['clock'] = self.inittime
send_data['data'] = self.list
return json.dumps(send_data)
def build_single(self, data):
send_data = {
"request": "sender data",
"data": [],
}
if 'clock' in data:
send_data['clock'] = self.inittime
send_data['data'].append(data)
return json.dumps(send_data)
def send(self, mydata):
socket.setdefaulttimeout(5)
data_length = len(mydata)
data_header = '{0}{1}'.format(struct.pack('i', data_length), '\0\0\0\0')
data_to_send = 'ZBXD\1{0}{1}'.format(data_header, mydata)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.zserver, self.zport))
sock.send(data_to_send)
except Exception as err:
err_message = u'Error talking to server: {0}\n'.format(err)
sys.stderr.write(err_message)
return 255, err_message
response_header = sock.recv(5)
if not response_header == 'ZBXD\1':
err_message = u'Invalid response from server. Malformed data?\n---\n{0}\n---\n'.format(mydata)
sys.stderr.write(err_message)
return 254, err_message
response_data_header = sock.recv(8)
response_data_header = response_data_header[:4]
response_len = struct.unpack('i', response_data_header)[0]
response_raw = sock.recv(response_len)
sock.close()
response = json.loads(response_raw)
match = re.match('^.*failed.+?(\d+).*$', response['info'].lower() if 'info' in response else '')
if match is None:
err_message = u'Unable to parse server response - \n{0}\n'.format(response)
sys.stderr.write(err_message)
return 2, response
else:
fails = int(match.group(1))
if fails > 0:
if self.verbose is True:
err_message = u'Failures reported by zabbix when sending:\n{0}\n'.format(mydata)
sys.stderr.write(err_message)
return 1, response
return 0, response
def bulk_send(self):
data = self.build_all()
result = self.send(data)
return result
def iter_send(self):
retarray = []
for i in self.list:
(retcode, retstring) = self.send(self.build_single(i))
retarray.append((retcode, i))
return retarray
|
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
if __name__ == "__main__":
APP = os.path.abspath(os.path.dirname(__file__))
sys.path.append(APP)
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["transmogrify"])
sys.exit(bool(failures))
|
import re
import sys
import os
def get_root_dir():
script = os.path.realpath(__file__)
return os.path.normpath(os.path.join(os.path.dirname(script), "../.."))
root_dir = get_root_dir()
source_directories = [
os.path.join(root_dir, "Src"),
os.path.join(root_dir, "Src", "DLR", "Src"),
]
exclude_directories = [
os.path.join(root_dir, "Src", "DLR"),
os.path.join(root_dir, "Src", "StdLib"),
]
START = "#region Generated %s"
END = "#endregion"
PREFIX = r"^([ \t]*)"
class ConditionWriter:
def __init__(self, cw):
self.cw = cw
self.first = True
def condition(self, text=None, **kw):
if self.first:
self.first = False
self.cw.enter_block(text, **kw)
else:
self.cw.else_block(text, **kw)
def close(self):
if not self.first:
self.cw.exit_block()
class CodeWriter:
def __init__(self, indent=0):
self.lines = []
self.__indent = indent
self.kws = {}
def begin_generated(self, generator):
self.writeline()
self.writeline("// *** BEGIN GENERATED CODE ***")
filename = os.path.basename(generator.__code__.co_filename)
self.writeline("// generated by function: " + generator.__name__ + " from: " + filename)
self.writeline()
def end_generated(self):
self.writeline()
self.writeline("// *** END GENERATED CODE ***")
self.writeline()
def indent(self): self.__indent += 1
def dedent(self): self.__indent -= 1
def writeline(self, text=None):
if text is None or text.strip() == "":
self.lines.append("")
else:
self.lines.append(" "*self.__indent + text)
def write(self, template, **kw):
if kw or self.kws:
kw1 = self.kws.copy()
kw1.update(kw)
#print kw
template = template % kw1
for l in template.split('\n'):
self.writeline(l)
def enter_block(self, text=None, **kw):
if text is not None:
self.write(text + " {", **kw)
self.indent()
def else_block(self, text=None, **kw):
self.dedent()
if text:
self.writeline("} else " + (text % kw) + " {")
else:
self.writeline("} else {")
self.indent()
def case_block(self, text=None, **kw):
self.enter_block(text, **kw)
self.indent()
def case_label(self, text=None, **kw):
self.write(text, **kw)
self.indent()
def exit_case_block(self):
self.exit_block()
self.dedent()
def catch_block(self, text=None, **kw):
self.dedent()
if text:
self.writeline("} catch " + (text % kw) + " {")
else:
self.writeline("} catch {")
self.indent()
def finally_block(self):
self.dedent()
self.writeline("} finally {")
self.indent()
def exit_block(self, text=None, **kw):
self.dedent()
if text:
self.writeline("} " + text, **kw)
else:
self.writeline('}')
def text(self):
return '\n'.join(self.lines)
def conditions(self):
return ConditionWriter(self)
class CodeGenerator:
def __init__(self, name, generator):
self.generator = generator
self.generators = []
self.replacer = BlockReplacer(name)
def do_file(self, filename):
g = FileGenerator(filename, self.generator, self.replacer)
if g.has_match:
self.generators.append(g)
def do_generate(self):
if not self.generators:
raise Exception("didn't find a match for %s" % self.replacer.name)
result = []
for g in self.generators:
result.append(g.generate())
return result
def do_dir(self, dirname):
if dirname in exclude_directories:
return
for file in os.listdir(dirname):
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
self.do_dir(filename)
elif filename.endswith(".cs") and not file == "StandardTestStrings.cs": # TODO: fix encoding of StandardTestStrings.cs
self.do_file(filename)
def doit(self):
for src_dir in source_directories:
self.do_dir(src_dir)
for g in self.generators:
g.collect_info()
return self.do_generate()
class BlockReplacer:
def __init__(self, name):
self.start = START % name
self.end = END# % name
#self.block_pat = re.compile(PREFIX+self.start+".*?"+self.end,
# re.DOTALL|re.MULTILINE)
self.name = name
def match(self, text):
#m = self.block_pat.search(text)
#if m is None: return None
#indent = m.group(1)
#return indent
startIndex = text.find(self.start)
if startIndex != -1:
origStart = startIndex
# go to the beginning of the line on which self.start appears
startIndex = text.rfind('\n', 0, startIndex) + 1
# Some simple parsing logic that allows us to use regions within
# our generated code
if self.end == '#endregion':
start = origStart + len(self.start)
regionIndex = -1
endregionIndex = -1
count = 0 # number of '#region' encountered minus '#endregion'
while True:
regionIndex = text.find('#region', start)
endregionIndex = text.find('#endregion', start)
if (endregionIndex >= 0 and
(endregionIndex < regionIndex or regionIndex == -1)):
if count == 0:
endIndex = endregionIndex
break
else:
count -= 1
start = endregionIndex + len("#endregion")
continue
if (regionIndex >= 0 and
(regionIndex < endregionIndex or endregionIndex == -1)):
count += 1
start = regionIndex + len("#region")
continue
if regionIndex == -1 and endregionIndex == -1:
# occurrences of '#region' outnumber #endregion"
endIndex = -1
break
else:
endIndex = text.find(self.end, startIndex)
if endIndex != -1:
indent = text[startIndex:origStart]
return (indent, startIndex, endIndex+len(self.end))
return None
def replace(self, cw, text, indent):
code = cw.lines
code.insert(0, self.start)
code.append(self.end)
def should_indent(line):
if not line: return False
if line.startswith("#region"): return True
if line.startswith("#endregion"): return True
if line.startswith("#"): return False
return True
#code_text = '\n' + indent
code_text = indent[0]
delim = False
for line in code:
if delim:
code_text += "\n"
if should_indent(line):
code_text += indent[0]
code_text += line
delim = True
#return self.block_pat.sub(code_text, text)
#indicies = self.match(text)
res = text[0:indent[1]] + code_text + text[indent[2]:len(text)]
return res
def save_file(name, text):
f = open(name, 'w', encoding='latin-1')
f.write(text)
f.close()
def texts_are_equivalent(texta, textb):
"""Compares two program texts by removing all identation and
blank lines first."""
def normalized_lines(text):
for l in text.splitlines():
l = l.strip()
if l:
yield l
texta = "\n".join(normalized_lines(texta))
textb = "\n".join(normalized_lines(textb))
return texta == textb
class FileGenerator:
def __init__(self, filename, generator, replacer):
self.filename = filename
self.generator = generator
self.replacer = replacer
with open(filename, encoding='latin-1') as thefile:
self.text = thefile.read()
self.indent = self.replacer.match(self.text)
self.has_match = self.indent is not None
def collect_info(self):
pass
def generate(self):
print("generate", end=' ')
if sys.argv.count('checkonly') > 0:
print("(check-only)", end=' ')
print(self.filename, "...", end=' ')
cw = CodeWriter()
cw.text = self.replacer.replace(CodeWriter(), self.text, self.indent)
cw.begin_generated(self.generator)
self.generator(cw)
cw.end_generated()
new_text = self.replacer.replace(cw, self.text, self.indent)
if not texts_are_equivalent(self.text, new_text):
if sys.argv.count('checkonly') > 0:
print("different!")
name = self.filename + ".diff"
print(" generated file saved as: " + name)
save_file(name, new_text)
return False
else:
print("updated")
save_file(self.filename, new_text)
else:
print("ok")
return True
def generate(*g):
result = []
for name, func in g:
run = CodeGenerator(name, func).doit()
result.extend(run)
return result
|
import types
from context import *
from exceptions import YaqlExecutionException, NoFunctionRegisteredException
import yaql
class Expression(object):
class Callable(object):
def __init__(self, wrapped_object, context):
self.wrapped_object = wrapped_object
self.yaql_context = context
def evaluate(self, data=None, context=None):
if not context:
context = Context(yaql.create_context())
if data:
context.set_data(data)
f = self.create_callable(context)
# noinspection PyCallingNonCallable
return f()
def create_callable(self, context):
pass
class Function(Expression):
def __init__(self, name, obj, *args):
self.name = name
self.object = obj
self.args = args
class Callable(Expression.Callable):
def __init__(self, wrapped, context, function_name, obj, args):
super(Function.Callable, self).__init__(wrapped, None)
self.function_name = function_name
self.obj = obj
self.args = args
if obj:
self.obj_wrapper = self.obj.create_callable(context)
if self.obj_wrapper.yaql_context:
self.yaql_context = Context(self.obj_wrapper.yaql_context)
else:
self.yaql_context = context
else:
self.yaql_context = context
self.obj_wrapper = None
def __call__(self, *f_params):
args_to_pass = []
context = self.yaql_context
if f_params:
param_context = self._find_param_context()
param_context.set_data(f_params[0])
for i in range(0, len(f_params)):
param_context.set_data(f_params[i], '$' + str(i + 1))
this = None
if self.obj_wrapper:
this = self.obj_wrapper()
args_to_pass.append(lambda: this)
for arg in self.args:
argContext = Context(context)
wrapped_arg = arg.create_callable(argContext)
args_to_pass.append(wrapped_arg)
numArg = len(args_to_pass)
fs = []
if not self.function_name.startswith('#'):
resolvers = self.yaql_context.get_functions('#resolve', 2)
if resolvers:
try:
fs = self._try_invoke(
resolvers,
[self.function_name, this], context) or []
if not isinstance(fs, types.ListType):
fs = [fs]
except YaqlExecutionException:
fs = []
fs.extend(self.yaql_context.get_functions(
self.function_name, numArg))
if not fs:
raise NoFunctionRegisteredException(self.function_name, numArg)
try:
return self._try_invoke(fs, args_to_pass, context)
except YaqlExecutionException:
raise YaqlExecutionException(
'Unable to run ' + self.function_name)
def _try_invoke(self, funcs, args, context):
for func in funcs:
try:
args_to_pass = pre_process_args(func, args)
if hasattr(func, 'is_context_aware'):
return func(context, *args_to_pass)
else:
return func(*args_to_pass)
except YaqlExecutionException:
continue
raise YaqlExecutionException()
def _find_param_context(self):
context = self.yaql_context
wrapper = self.obj_wrapper
while wrapper:
context = wrapper.yaql_context
wrapper = getattr(wrapper, 'obj_wrapper', None)
return context
def create_callable(self, context):
return Function.Callable(self, context, self.name, self.object,
self.args)
class BinaryOperator(Function):
def __init__(self, op, obj1, obj2):
super(BinaryOperator, self).__init__("#operator_" + op, None, obj1,
obj2)
class UnaryOperator(Function):
def __init__(self, op, obj):
super(UnaryOperator, self).__init__("#operator_" + op, obj)
class Att(Function):
def __init__(self, obj, att):
super(Att, self).__init__('#operator_.', obj, att)
class Filter(Function):
def __init__(self, obj, expression):
super(Filter, self).__init__("where", obj, expression)
class Tuple(Function):
def __init__(self, left, right):
super(Tuple, self).__init__('tuple', None, left, right)
@staticmethod
def create_tuple(left, right):
if isinstance(left, Tuple):
new_args = list(left.args)
new_args.append(right)
left.args = tuple(new_args)
return left
else:
return Tuple(left, right)
class Wrap(Function):
def __init__(self, content):
super(Wrap, self).__init__('#wrap', None, content)
class GetContextValue(Function):
def __init__(self, path):
super(GetContextValue, self).__init__("#get_context_data", None,
path)
self.path = path
def __str__(self):
return self.path
class Constant(Expression):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Callable(Expression.Callable):
def __init__(self, wrapped, value):
super(Constant.Callable, self).__init__(wrapped, None)
self.value = value
# noinspection PyUnusedLocal
def __call__(self, *args):
return self.value
def create_callable(self, context):
return Constant.Callable(self, self.value)
def pre_process_args(func, args):
result = args[:]
if hasattr(func, 'arg_requirements'):
if hasattr(func, 'is_context_aware'):
ca = func.context_aware
att_map = ca.map_args(args)
else:
att_map = {}
arg_names = inspect.getargspec(func).args
for i, arg_name in enumerate(arg_names):
att_map[arg_name] = args[i]
for arg_name in func.arg_requirements:
arg_func = att_map[arg_name]
try:
arg_val = arg_func()
except:
raise YaqlExecutionException(
"Unable to evaluate argument {0}".format(arg_name))
arg_type = func.arg_requirements[arg_name].arg_type
custom_validator = func.arg_requirements[arg_name].custom_validator
ok = True
if arg_type:
ok = ok and isinstance(arg_val, arg_type)
if type(arg_val) == types.BooleanType:
ok = ok and type(arg_val) == arg_type
if custom_validator:
ok = ok and custom_validator(arg_val)
if not ok:
raise YaqlExecutionException(
"Argument {0} is invalid".format(arg_name))
result[args.index(arg_func)] = arg_val
return tuple(result)
|
import sys
from contextlib import redirect_stdout, redirect_stderr
from datetime import datetime
import copy
import io
import logging
import glob
import os
import pickle
import platform
import pandas as pd
from ray.tune.utils.util import Tee
from six import string_types
import shutil
import tempfile
import time
import uuid
import ray
from ray.util.debug import log_once
from ray.tune.logger import UnifiedLogger
from ray.tune.result import (
DEFAULT_RESULTS_DIR, TIME_THIS_ITER_S, TIMESTEPS_THIS_ITER, DONE,
TIMESTEPS_TOTAL, EPISODES_THIS_ITER, EPISODES_TOTAL, TRAINING_ITERATION,
RESULT_DUPLICATE, TRIAL_INFO, STDOUT_FILE, STDERR_FILE)
from ray.tune.utils import UtilMonitor
logger = logging.getLogger(__name__)
SETUP_TIME_THRESHOLD = 10
class TrainableUtil:
@staticmethod
def process_checkpoint(checkpoint, parent_dir, trainable_state):
saved_as_dict = False
if isinstance(checkpoint, string_types):
if not checkpoint.startswith(parent_dir):
raise ValueError(
"The returned checkpoint path must be within the "
"given checkpoint dir {}: {}".format(
parent_dir, checkpoint))
checkpoint_path = checkpoint
if os.path.isdir(checkpoint_path):
# Add trailing slash to prevent tune metadata from
# being written outside the directory.
checkpoint_path = os.path.join(checkpoint_path, "")
elif isinstance(checkpoint, dict):
saved_as_dict = True
checkpoint_path = os.path.join(parent_dir, "checkpoint")
with open(checkpoint_path, "wb") as f:
pickle.dump(checkpoint, f)
else:
raise ValueError("Returned unexpected type {}. "
"Expected str or dict.".format(type(checkpoint)))
with open(checkpoint_path + ".tune_metadata", "wb") as f:
trainable_state["saved_as_dict"] = saved_as_dict
pickle.dump(trainable_state, f)
return checkpoint_path
@staticmethod
def pickle_checkpoint(checkpoint_path):
"""Pickles checkpoint data."""
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
data = {}
for basedir, _, file_names in os.walk(checkpoint_dir):
for file_name in file_names:
path = os.path.join(basedir, file_name)
with open(path, "rb") as f:
data[os.path.relpath(path, checkpoint_dir)] = f.read()
# Use normpath so that a directory path isn't mapped to empty string.
name = os.path.relpath(
os.path.normpath(checkpoint_path), checkpoint_dir)
name += os.path.sep if os.path.isdir(checkpoint_path) else ""
data_dict = pickle.dumps({
"checkpoint_name": name,
"data": data,
})
return data_dict
@staticmethod
def checkpoint_to_object(checkpoint_path):
data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path)
out = io.BytesIO()
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
return out.getvalue()
@staticmethod
def find_checkpoint_dir(checkpoint_path):
"""Returns the directory containing the checkpoint path.
Raises:
FileNotFoundError if the directory is not found.
"""
if not os.path.exists(checkpoint_path):
raise FileNotFoundError("Path does not exist", checkpoint_path)
if os.path.isdir(checkpoint_path):
checkpoint_dir = checkpoint_path
else:
checkpoint_dir = os.path.dirname(checkpoint_path)
while checkpoint_dir != os.path.dirname(checkpoint_dir):
if os.path.exists(os.path.join(checkpoint_dir, ".is_checkpoint")):
break
checkpoint_dir = os.path.dirname(checkpoint_dir)
else:
raise FileNotFoundError("Checkpoint directory not found for {}"
.format(checkpoint_path))
return checkpoint_dir
@staticmethod
def make_checkpoint_dir(checkpoint_dir, index, override=False):
"""Creates a checkpoint directory within the provided path.
Args:
checkpoint_dir (str): Path to checkpoint directory.
index (str): A subdirectory will be created
at the checkpoint directory named 'checkpoint_{index}'.
override (bool): Deletes checkpoint_dir before creating
a new one.
"""
suffix = "checkpoint"
if index is not None:
suffix += "_{}".format(index)
checkpoint_dir = os.path.join(checkpoint_dir, suffix)
if override and os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# Drop marker in directory to identify it as a checkpoint dir.
open(os.path.join(checkpoint_dir, ".is_checkpoint"), "a").close()
return checkpoint_dir
@staticmethod
def create_from_pickle(obj, tmpdir):
info = pickle.loads(obj)
data = info["data"]
checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"])
for relpath_name, file_contents in data.items():
path = os.path.join(tmpdir, relpath_name)
# This may be a subdirectory, hence not just using tmpdir
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(file_contents)
return checkpoint_path
@staticmethod
def get_checkpoints_paths(logdir):
""" Finds the checkpoints within a specific folder.
Returns a pandas DataFrame of training iterations and checkpoint
paths within a specific folder.
Raises:
FileNotFoundError if the directory is not found.
"""
marker_paths = glob.glob(
os.path.join(logdir, "checkpoint_*/.is_checkpoint"))
iter_chkpt_pairs = []
for marker_path in marker_paths:
chkpt_dir = os.path.dirname(marker_path)
metadata_file = glob.glob(
os.path.join(chkpt_dir, "*.tune_metadata"))
if len(metadata_file) != 1:
raise ValueError(
"{} has zero or more than one tune_metadata.".format(
chkpt_dir))
chkpt_path = metadata_file[0][:-len(".tune_metadata")]
chkpt_iter = int(chkpt_dir[chkpt_dir.rfind("_") + 1:])
iter_chkpt_pairs.append([chkpt_iter, chkpt_path])
chkpt_df = pd.DataFrame(
iter_chkpt_pairs, columns=["training_iteration", "chkpt_path"])
return chkpt_df
class Trainable:
"""Abstract class for trainable models, functions, etc.
A call to ``train()`` on a trainable will execute one logical iteration of
training. As a rule of thumb, the execution time of one train call should
be large enough to avoid overheads (i.e. more than a few seconds), but
short enough to report progress periodically (i.e. at most a few minutes).
Calling ``save()`` should save the training state of a trainable to disk,
and ``restore(path)`` should restore a trainable to the given state.
Generally you only need to implement ``setup``, ``step``,
``save_checkpoint``, and ``load_checkpoint`` when subclassing Trainable.
Other implementation methods that may be helpful to override are
``log_result``, ``reset_config``, ``cleanup``, and ``_export_model``.
When using Tune, Tune will convert this class into a Ray actor, which
runs on a separate process. Tune will also change the current working
directory of this process to ``self.logdir``.
"""
def __init__(self, config=None, logger_creator=None):
"""Initialize an Trainable.
Sets up logging and points ``self.logdir`` to a directory in which
training outputs should be placed.
Subclasses should prefer defining ``build()`` instead of overriding
``__init__()`` directly.
Args:
config (dict): Trainable-specific configuration data. By default
will be saved as ``self.config``.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
self._experiment_id = uuid.uuid4().hex
self.config = config or {}
trial_info = self.config.pop(TRIAL_INFO, None)
self._result_logger = self._logdir = None
self._create_logger(self.config, logger_creator)
self._stdout_context = self._stdout_fp = self._stdout_stream = None
self._stderr_context = self._stderr_fp = self._stderr_stream = None
self._stderr_logging_handler = None
stdout_file = self.config.pop(STDOUT_FILE, None)
stderr_file = self.config.pop(STDERR_FILE, None)
self._open_logfiles(stdout_file, stderr_file)
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
self._trial_info = trial_info
start_time = time.time()
self.setup(copy.deepcopy(self.config))
setup_time = time.time() - start_time
if setup_time > SETUP_TIME_THRESHOLD:
logger.info("Trainable.setup took {:.3f} seconds. If your "
"trainable is slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(setup_time))
self._local_ip = self.get_current_ip()
log_sys_usage = self.config.get("log_sys_usage", False)
self._monitor = UtilMonitor(start=log_sys_usage)
@classmethod
def default_resource_request(cls, config):
"""Provides a static resource requirement for the given configuration.
This can be overridden by sub-classes to set the correct trial resource
allocation, so the user does not need to.
.. code-block:: python
@classmethod
def default_resource_request(cls, config):
return Resources(
cpu=0,
gpu=0,
extra_cpu=config["workers"],
extra_gpu=int(config["use_gpu"]) * config["workers"])
Returns:
Resources: A Resources object consumed by Tune for queueing.
"""
return None
@classmethod
def resource_help(cls, config):
"""Returns a help string for configuring this trainable's resources.
Args:
config (dict): The Trainer's config dict.
"""
return ""
def get_current_ip(self):
self._local_ip = ray.services.get_node_ip_address()
return self._local_ip
def train(self):
"""Runs one logical iteration of training.
Calls ``step()`` internally. Subclasses should override ``step()``
instead to return results.
This method automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train(). This is incremented
after `step()` is called.
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
"""
start = time.time()
result = self.step()
assert isinstance(result, dict), "step() needs to return a dict."
# We do not modify internal state nor update this result if duplicate.
if RESULT_DUPLICATE in result:
return result
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._episodes_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
result.setdefault(TRAINING_ITERATION, self._iteration)
# Provides auto-filled neg_mean_loss for avoiding regressions
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=platform.node(),
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore)
monitor_data = self._monitor.get_data()
if monitor_data:
result.update(monitor_data)
self.log_result(result)
if self._stdout_context:
self._stdout_stream.flush()
if self._stderr_context:
self._stderr_stream.flush()
return result
def get_state(self):
return {
"experiment_id": self._experiment_id,
"iteration": self._iteration,
"timesteps_total": self._timesteps_total,
"time_total": self._time_total,
"episodes_total": self._episodes_total,
"ray_version": ray.__version__,
}
def save(self, checkpoint_dir=None):
"""Saves the current model state to a checkpoint.
Subclasses should override ``_save()`` instead to save state.
This method dumps additional metadata alongside the saved path.
Args:
checkpoint_dir (str): Optional dir to place the checkpoint.
Returns:
str: Checkpoint path or prefix that may be passed to restore().
"""
checkpoint_dir = TrainableUtil.make_checkpoint_dir(
checkpoint_dir or self.logdir, index=self.iteration)
checkpoint = self.save_checkpoint(checkpoint_dir)
trainable_state = self.get_state()
checkpoint_path = TrainableUtil.process_checkpoint(
checkpoint,
parent_dir=checkpoint_dir,
trainable_state=trainable_state)
return checkpoint_path
def save_to_object(self):
"""Saves the current model state to a Python object.
It also saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
"""
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_path = self.save(tmpdir)
# Save all files in subtree and delete the tmpdir.
obj = TrainableUtil.checkpoint_to_object(checkpoint_path)
shutil.rmtree(tmpdir)
return obj
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``_restore()`` instead to restore state.
This method restores additional metadata saved with the checkpoint.
"""
with open(checkpoint_path + ".tune_metadata", "rb") as f:
metadata = pickle.load(f)
self._experiment_id = metadata["experiment_id"]
self._iteration = metadata["iteration"]
self._timesteps_total = metadata["timesteps_total"]
self._time_total = metadata["time_total"]
self._episodes_total = metadata["episodes_total"]
saved_as_dict = metadata["saved_as_dict"]
if saved_as_dict:
with open(checkpoint_path, "rb") as loaded_state:
checkpoint_dict = pickle.load(loaded_state)
checkpoint_dict.update(tune_checkpoint_path=checkpoint_path)
self.load_checkpoint(checkpoint_dict)
else:
self.load_checkpoint(checkpoint_path)
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = True
logger.info("Restored on %s from checkpoint: %s",
self.get_current_ip(), checkpoint_path)
state = {
"_iteration": self._iteration,
"_timesteps_total": self._timesteps_total,
"_time_total": self._time_total,
"_episodes_total": self._episodes_total,
}
logger.info("Current state after restoring: %s", state)
def restore_from_object(self, obj):
"""Restores training state from a checkpoint object.
These checkpoints are returned from calls to save_to_object().
"""
tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir)
checkpoint_path = TrainableUtil.create_from_pickle(obj, tmpdir)
self.restore(checkpoint_path)
shutil.rmtree(tmpdir)
def delete_checkpoint(self, checkpoint_path):
"""Deletes local copy of checkpoint.
Args:
checkpoint_path (str): Path to checkpoint.
"""
try:
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
except FileNotFoundError:
# The checkpoint won't exist locally if the
# trial was rescheduled to another worker.
logger.debug("Checkpoint not found during garbage collection.")
return
if os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
def export_model(self, export_formats, export_dir=None):
"""Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats (Union[list,str]): Format or list of (str) formats
that should be exported.
export_dir (str): Optional dir to place the exported model.
Defaults to self.logdir.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
if isinstance(export_formats, str):
export_formats = [export_formats]
export_dir = export_dir or self.logdir
return self._export_model(export_formats, export_dir)
def reset(self, new_config, logger_creator=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._result_logger.flush()
self._result_logger.close()
self._create_logger(new_config.copy(), logger_creator)
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
return self.reset_config(new_config)
def reset_config(self, new_config):
"""Resets configuration without restarting the trial.
This method is optional, but can be implemented to speed up algorithms
such as PBT, and to allow performance optimizations such as running
experiments with reuse_actors=True.
Args:
new_config (dict): Updated hyperparameter configuration
for the trainable.
Returns:
True if reset was successful else False.
"""
return False
def _create_logger(self, config, logger_creator=None):
"""Create logger from logger creator.
Sets _logdir and _result_logger.
"""
if logger_creator:
self._result_logger = logger_creator(config)
self._logdir = self._result_logger.logdir
else:
logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
ray.utils.try_to_create_directory(DEFAULT_RESULTS_DIR)
self._logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
self._result_logger = UnifiedLogger(
config, self._logdir, loggers=None)
def _open_logfiles(self, stdout_file, stderr_file):
"""Create loggers. Open stdout and stderr logfiles."""
if stdout_file:
stdout_path = os.path.expanduser(
os.path.join(self._logdir, stdout_file))
self._stdout_fp = open(stdout_path, "a+")
self._stdout_stream = Tee(sys.stdout, self._stdout_fp)
self._stdout_context = redirect_stdout(self._stdout_stream)
self._stdout_context.__enter__()
if stderr_file:
stderr_path = os.path.expanduser(
os.path.join(self._logdir, stderr_file))
self._stderr_fp = open(stderr_path, "a+")
self._stderr_stream = Tee(sys.stderr, self._stderr_fp)
self._stderr_context = redirect_stderr(self._stderr_stream)
self._stderr_context.__enter__()
# Add logging handler to root ray logger
formatter = logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
self._stderr_logging_handler = logging.StreamHandler(
self._stderr_fp)
self._stderr_logging_handler.setFormatter(formatter)
ray.logger.addHandler(self._stderr_logging_handler)
def _close_logfiles(self):
"""Close stdout and stderr logfiles."""
if self._stderr_logging_handler:
ray.logger.removeHandler(self._stderr_logging_handler)
if self._stdout_context:
self._stdout_stream.flush()
self._stdout_context.__exit__(None, None, None)
self._stdout_fp.close()
self._stdout_context = None
if self._stderr_context:
self._stderr_stream.flush()
self._stderr_context.__exit__(None, None, None)
self._stderr_fp.close()
self._stderr_context = None
def stop(self):
"""Releases all resources used by this trainable.
Calls ``Trainable.cleanup`` internally. Subclasses should override
``Trainable.cleanup`` for custom cleanup procedures.
"""
self._result_logger.flush()
self._result_logger.close()
self.cleanup()
self._close_logfiles()
@property
def logdir(self):
"""Directory of the results and checkpoints for this Trainable.
Tune will automatically sync this folder with the driver if execution
is distributed.
Note that the current working directory will also be changed to this.
"""
return os.path.join(self._logdir, "")
@property
def trial_name(self):
"""Trial name for the corresponding trial of this Trainable.
This is not set if not using Tune.
.. code-block:: python
name = self.trial_name
"""
if self._trial_info:
return self._trial_info.trial_name
else:
return "default"
@property
def trial_id(self):
"""Trial ID for the corresponding trial of this Trainable.
This is not set if not using Tune.
.. code-block:: python
trial_id = self.trial_id
"""
if self._trial_info:
return self._trial_info.trial_id
else:
return "default"
@property
def iteration(self):
"""Current training iteration.
This value is automatically incremented every time `train()` is called
and is automatically inserted into the training result dict.
"""
return self._iteration
@property
def training_iteration(self):
"""Current training iteration (same as `self.iteration`).
This value is automatically incremented every time `train()` is called
and is automatically inserted into the training result dict.
"""
return self._iteration
def get_config(self):
"""Returns configuration passed in by Tune."""
return self.config
def step(self):
"""Subclasses should override this to implement train().
The return value will be automatically passed to the loggers. Users
can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`
as a key to manually trigger termination or checkpointing of this
trial. Note that manual checkpointing only works when subclassing
Trainables.
.. versionadded:: 0.8.7
Returns:
A dict that describes training progress.
"""
result = self._train()
if self._is_overriden("_train") and log_once("_train"):
logger.warning(
"Trainable._train is deprecated and will be removed in "
"a future version of Ray. Override Trainable.step instead.")
return result
def _train(self):
"""This method is deprecated. Override 'Trainable.step' instead.
.. versionchanged:: 0.8.7
"""
raise NotImplementedError
def save_checkpoint(self, tmp_checkpoint_dir):
"""Subclasses should override this to implement ``save()``.
Warning:
Do not rely on absolute paths in the implementation of
``Trainable.save_checkpoint`` and ``Trainable.load_checkpoint``.
Use ``validate_save_restore`` to catch ``Trainable.save_checkpoint``/
``Trainable.load_checkpoint`` errors before execution.
>>> from ray.tune.utils import validate_save_restore
>>> validate_save_restore(MyTrainableClass)
>>> validate_save_restore(MyTrainableClass, use_object_store=True)
.. versionadded:: 0.8.7
Args:
tmp_checkpoint_dir (str): The directory where the checkpoint
file must be stored. In a Tune run, if the trial is paused,
the provided path may be temporary and moved.
Returns:
A dict or string. If string, the return value is expected to be
prefixed by `tmp_checkpoint_dir`. If dict, the return value will
be automatically serialized by Tune and
passed to ``Trainable.load_checkpoint()``.
Examples:
>>> print(trainable1.save_checkpoint("/tmp/checkpoint_1"))
"/tmp/checkpoint_1/my_checkpoint_file"
>>> print(trainable2.save_checkpoint("/tmp/checkpoint_2"))
{"some": "data"}
>>> trainable.save_checkpoint("/tmp/bad_example")
"/tmp/NEW_CHECKPOINT_PATH/my_checkpoint_file" # This will error.
"""
checkpoint = self._save(tmp_checkpoint_dir)
if self._is_overriden("_save") and log_once("_save"):
logger.warning(
"Trainable._save is deprecated and will be removed in a "
"future version of Ray. Override "
"Trainable.save_checkpoint instead.")
return checkpoint
def _save(self, tmp_checkpoint_dir):
"""This method is deprecated. Override 'save_checkpoint' instead.
.. versionchanged:: 0.8.7
"""
raise NotImplementedError
def load_checkpoint(self, checkpoint):
"""Subclasses should override this to implement restore().
Warning:
In this method, do not rely on absolute paths. The absolute
path of the checkpoint_dir used in ``Trainable.save_checkpoint``
may be changed.
If ``Trainable.save_checkpoint`` returned a prefixed string, the
prefix of the checkpoint string returned by
``Trainable.save_checkpoint`` may be changed.
This is because trial pausing depends on temporary directories.
The directory structure under the checkpoint_dir provided to
``Trainable.save_checkpoint`` is preserved.
See the example below.
.. code-block:: python
class Example(Trainable):
def save_checkpoint(self, checkpoint_path):
print(checkpoint_path)
return os.path.join(checkpoint_path, "my/check/point")
def load_checkpoint(self, checkpoint):
print(checkpoint)
>>> trainer = Example()
>>> obj = trainer.save_to_object() # This is used when PAUSED.
<logdir>/tmpc8k_c_6hsave_to_object/checkpoint_0/my/check/point
>>> trainer.restore_from_object(obj) # Note the different prefix.
<logdir>/tmpb87b5axfrestore_from_object/checkpoint_0/my/check/point
.. versionadded:: 0.8.7
Args:
checkpoint (str|dict): If dict, the return value is as
returned by `save_checkpoint`. If a string, then it is
a checkpoint path that may have a different prefix than that
returned by `save_checkpoint`. The directory structure
underneath the `checkpoint_dir` `save_checkpoint` is preserved.
"""
self._restore(checkpoint)
if self._is_overriden("_restore") and log_once("_restore"):
logger.warning(
"Trainable._restore is deprecated and will be removed in a "
"future version of Ray. Override Trainable.load_checkpoint "
"instead.")
def _restore(self, checkpoint):
"""This method is deprecated. Override 'load_checkpoint' instead.
.. versionchanged:: 0.8.7
"""
raise NotImplementedError
def setup(self, config):
"""Subclasses should override this for custom initialization.
.. versionadded:: 0.8.7
Args:
config (dict): Hyperparameters and other configs given.
Copy of `self.config`.
"""
self._setup(config)
if self._is_overriden("_setup") and log_once("_setup"):
logger.warning(
"Trainable._setup is deprecated and will be removed in "
"a future version of Ray. Override Trainable.setup instead.")
def _setup(self, config):
"""This method is deprecated. Override 'setup' instead.
.. versionchanged:: 0.8.7
"""
pass
def log_result(self, result):
"""Subclasses can optionally override this to customize logging.
The logging here is done on the worker process rather than
the driver. You may want to turn off driver logging via the
``loggers`` parameter in ``tune.run`` when overriding this function.
.. versionadded:: 0.8.7
Args:
result (dict): Training result returned by step().
"""
self._log_result(result)
if self._is_overriden("_log_result") and log_once("_log_result"):
logger.warning(
"Trainable._log_result is deprecated and will be removed in "
"a future version of Ray. Override "
"Trainable.log_result instead.")
def _log_result(self, result):
"""This method is deprecated. Override 'log_result' instead.
.. versionchanged:: 0.8.7
"""
self._result_logger.on_result(result)
def cleanup(self):
"""Subclasses should override this for any cleanup on stop.
If any Ray actors are launched in the Trainable (i.e., with a RLlib
trainer), be sure to kill the Ray actor process here.
You can kill a Ray actor by calling `actor.__ray_terminate__.remote()`
on the actor.
.. versionadded:: 0.8.7
"""
self._stop()
if self._is_overriden("_stop") and log_once("trainable.cleanup"):
logger.warning(
"Trainable._stop is deprecated and will be removed in "
"a future version of Ray. Override Trainable.cleanup instead.")
def _stop(self):
"""This method is deprecated. Override 'cleanup' instead.
.. versionchanged:: 0.8.7
"""
pass
def _export_model(self, export_formats, export_dir):
"""Subclasses should override this to export model.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Directory to place exported models.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
return {}
def _is_overriden(self, key):
return getattr(self, key).__code__ != getattr(Trainable, key).__code__
|
import csv
import itertools
from pyomo.environ import value
import __main__ as main
interactive_session = not hasattr(main, '__file__')
csv.register_dialect("ampl-tab",
delimiter="\t",
lineterminator="\n",
doublequote=False, escapechar="\\",
quotechar='"', quoting=csv.QUOTE_MINIMAL,
skipinitialspace = False
)
def write_table(model, *indexes, **kwargs):
# there must be a way to accept specific named keyword arguments and also an
# open-ended list of positional arguments (*indexes), but I don't know what that is.
output_file = kwargs["output_file"]
headings = kwargs["headings"]
values = kwargs["values"]
# create a master indexing set
# this is a list of lists, even if only one list was specified
idx = itertools.product(*indexes)
with open(output_file, 'wb') as f:
w = csv.writer(f, dialect="ampl-tab")
# write header row
w.writerow(list(headings))
# write the data
w.writerows(
tuple(value(v) for v in values(model, *x))
for x in idx
)
|
import itertools
from logger import *
from playbook import *
from sudoku import *
class NakedGroup(Strategy):
__metaclass__ = StrategyMeta
"""
NAKED-GROUP, including single, pair, triple, quad, etc. Naked in
this context refers to all the remaining hints in the group of
nodes. A naked group has the same number of hints among them as
the number of nodes in the group.
For example, a naked single refers to the degenerate case where a
node has but one hint left such that the value of the node is known.
A naked pair, also known as a conjugate pair, is a set of two nodes
with two hints that belong to at least one lot. A naked triple is
any group of three nodes that contain in total three hints. Etc. etc.
The hints carried within a naked group are exclusive to the nodes
within the said group. In other words, they mustn't appear elsewhere
in the lot. Hence, we can eliminate the hints beyond the group.
"""
def __init__(self):
Strategy.__init__(self, "NAKED-GROUP")
"""
Find all naked groups in the given lot. Since we are using the
naked group strategy to eliminate hints from nodes outside the
group, the size of the naked groups we are looking for should be
less than the total number of incomplete nodes.
"""
def find_naked_groups(self, lot):
groups = []
nodes = set(lot.get_incomplete())
for i in range(1, len(nodes)):
# Find all naked groups of size i.
for candidate in [set(x) for x in itertools.combinations(nodes, i)]:
# Skip candidate with nodes that are members of another group.
if not candidate <= nodes:
continue
# Naked group has number of hints equal to the size of the group.
hints = self.all_hints(candidate)
if len(hints) < len(candidate):
raise LogicException(self)
if len(hints) > len(candidate):
continue
groups.append((hints, candidate))
# Remove the candidate nodes so tbey won't form another group.
nodes -= candidate
return groups
"""
Find and process naked groups within the given lot. Once a naked
group is found, its hints are eliminated elsewhere in the lot.
"""
def naked_group(self, plan, lot):
status = False
for hints, group in self.find_naked_groups(lot):
nodes = lot.other_nodes(group)
if self.test_purge(nodes, hints):
reason = {"hints": hints, "group": group}
self.purge_hints(plan, nodes, hints, reason)
status = True
return status
"""
Process naked groups across all lots.
"""
def run(self, plan):
return any([self.naked_group(plan, lot)
for lot in plan.get_sudoku().get_lots()])
|
import logging
import requests
import json
import esgfpid.utils
import esgfpid.solr.tasks.filehandles_same_dataset
import esgfpid.solr.tasks.all_versions_of_dataset
import esgfpid.solr.serverconnector
import esgfpid.defaults
import esgfpid.exceptions
from esgfpid.utils import loginfo, logdebug, logtrace, logerror, logwarn
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
'''
This class provides the solr facade for the rest of
the library.
All requests to solr are addressed to an instance of
this class.
It redirects the calls for the various tasks to the
responsible submodules, which create solr queries and
can parse results (:py:mod:`~esgfpid.solr.tasks`)
The actual interaction with solr server (sending of
a ready-made query, receiving the response) is handled
by another class
(:py:class:`~esgfpid.solr.serverconnector.SolrServerConnector`).
'''
class SolrInteractor(object):
# Constructor:
'''
:param switched_off: Mandatory. Boolean.
:param prefix: Mandatory if not switched off.
:param solr_url: Mandatory if not switched off.
:param https_verify: Mandatory if not switched off.
:param disable_insecure_request_warning: Mandatory if not switched off.
'''
def __init__(self, **args):
mandatory_args = [
'switched_off',
'prefix',
'solr_url',
'https_verify',
'disable_insecure_request_warning'
]
esgfpid.utils.check_presence_of_mandatory_args(args, ['switched_off'])
if args['switched_off'] == True:
logdebug(LOGGER, 'Initializing solr module without access..')
self.__init_without_access()
logdebug(LOGGER, 'Initializing solr module without access.. done')
else:
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
logdebug(LOGGER, 'Initializing solr module..')
self.__init_with_access(args)
logdebug(LOGGER, 'Initializing solr module.. done')
def __init_without_access(self):
self.__switched_on = False
self.__prefix = None
self.__solr_server_connector = None
def __init_with_access(self, args):
self.__switched_on = True
self.__check_presence_of_args(args)
self.__prefix = args['prefix']
self.__make_server_connector(args)
def __check_presence_of_args(self, args):
mandatory_args = ['solr_url', 'prefix', 'https_verify',
'disable_insecure_request_warning', 'switched_off']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
def __make_server_connector(self, args):
self.__solr_server_connector = esgfpid.solr.serverconnector.SolrServerConnector(
solr_url = args['solr_url'],
https_verify = args ['https_verify'],
disable_insecure_request_warning = args['disable_insecure_request_warning']
)
# Getter
'''
State getter.
:returns: True if the solr module is switched off, i.e.
it either received a switch-off flag from the library
or had no solr URL passed. False if not switched off.
'''
def is_switched_off(self):
return not self.__switched_on
# Methods called by tasks:
def send_query(self, query):
''' This method is called by the tasks. It is redirected to the submodule.'''
if self.__switched_on:
return self.__solr_server_connector.send_query(query)
else:
msg = 'Not sending query'
LOGGER.debug(msg)
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def make_solr_base_query(self):
query_dict = {}
query_dict['distrib'] = esgfpid.defaults.SOLR_QUERY_DISTRIB
query_dict['format'] = 'application/solr+json'
query_dict['limit'] = 0 # As we don't want all the details of the found files/datasets!
return query_dict
#####################
### Various tasks ###
#####################
# Task 1
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
LOGGER.debug('Looking for files of dataset "%s", version "%s".',
args['drs_id'], str(args['version_number']))
if self.__switched_on:
return self.__retrieve_file_handles_of_same_dataset(**args)
else:
msg = 'Cannot retrieve handles of files of the same dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def __retrieve_file_handles_of_same_dataset(self, **args):
finder = esgfpid.solr.tasks.filehandles_same_dataset.FindFilesOfSameDatasetVersion(self)
args['prefix'] = self.__prefix
file_handles = finder.retrieve_file_handles_of_same_dataset(**args)
return file_handles
# Task 2
def retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
LOGGER.debug('Looking for dataset handles or version numbers of '+
'dataset "%s".', drs_id)
if self.__switched_on:
return self.__retrieve_datasethandles_or_versionnumbers_of_allversions(drs_id)
else:
msg = 'Cannot retrieve handles or version numbers of all versions of the dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def __retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
finder = esgfpid.solr.tasks.all_versions_of_dataset.FindVersionsOfSameDataset(self)
result_dict = finder.retrieve_dataset_handles_or_version_numbers_of_all_versions(drs_id, self.__prefix)
return result_dict
|
import os
import sys
import string
import time
import datetime
import MySQLdb
import winrm
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("check_os")
path='./include'
sys.path.insert(0,path)
import functions as func
import alert_os as alert
import alert_main as mail
import thread
from multiprocessing import Process;
dbhost = func.get_config('monitor_server','host')
dbport = func.get_config('monitor_server','port')
dbuser = func.get_config('monitor_server','user')
dbpasswd = func.get_config('monitor_server','passwd')
dbname = func.get_config('monitor_server','dbname')
def check_os_snmp_linux(ip,port,filter_os_disk,tags):
try :
community="public"
# get hostname
command="""/usr/bin/snmpwalk -v1 -c %s %s SNMPv2-MIB::sysName.0|awk '{print $NF}' """ %(community, ip)
res_file=os.popen(command)
hostname=res_file.read()
if hostname != "":
# get kernel
command="""/usr/bin/snmpwalk -v1 -c %s %s SNMPv2-MIB::sysDescr.0|awk '{print $4 " " $6 " " $15}' """ %(community, ip)
res_file=os.popen(command)
kernel=res_file.read().replace('\n','')
# get system_date
command="""/usr/bin/snmpwalk -v1 -c %s %s HOST-RESOURCES-MIB::hrSystemDate.0|cut -d '=' -f2|cut -d ' ' -f3 """ %(community, ip)
date_file=os.popen(command)
system_date=date_file.read()
# get system_uptime
command="""/usr/bin/snmpwalk -v1 -c %s %s HOST-RESOURCES-MIB::hrSystemUptime.0|cut -d ')' -f2 """ %(community, ip)
uptime_file=os.popen(command)
system_uptime=uptime_file.read()
# get process
command="""/usr/bin/snmpwalk -v1 -c %s %s HOST-RESOURCES-MIB::hrSystemProcesses.0|cut -d ' ' -f4 """ %(community, ip)
uptime_file=os.popen(command)
process=uptime_file.read()
if process !="":
process=int(process)
# get load_1
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::laLoad.1 | awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
load_1=uptime_file.read()
if load_1 !="":
load_1=("%.2f" %float(load_1))
# get load_5
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::laLoad.2 | awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
load_5=uptime_file.read()
if load_5 !="":
load_5=("%.2f" %float(load_5))
# get load_15
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::laLoad.3 | awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
load_15=uptime_file.read()
if load_15 !="":
load_15=("%.2f" %float(load_15))
# get cpu_user_time
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::ssCpuUser.0 |awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
cpu_user_time=uptime_file.read()
if cpu_user_time !="":
cpu_user_time=int(cpu_user_time)
# get cpu_system_time
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::ssCpuSystem.0 |awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
cpu_system_time=uptime_file.read()
if cpu_system_time !="":
cpu_system_time=int(cpu_system_time)
# get cpu_idle_time
# command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::ssCpuIdle.0 |awk '{print $NF}' """ %(community, ip)
# uptime_file=os.popen(command)
# cpu_idle_time=uptime_file.read()
cpu_idle_time = -1
if cpu_user_time !="" and cpu_system_time != "":
cpu_idle_time=100 - int(cpu_user_time) - int(cpu_system_time)
# get swap_total
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memTotalSwap.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
swap_total=uptime_file.read()
if swap_total !="":
swap_total=int(swap_total)
# get swap_avail
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memAvailSwap.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
swap_avail = uptime_file.read()
if swap_avail !="":
swap_avail=int(swap_avail)
# get mem_total
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memTotalReal.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_total=uptime_file.read()
if mem_total !="":
mem_total=int(mem_total)
# get mem_avail
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memAvailReal.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_avail=uptime_file.read()
if mem_avail !="":
mem_avail=int(mem_avail)
# get mem_free
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memTotalFree.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_free=uptime_file.read()
if mem_free !="":
mem_free=int(mem_free)
# get mem_shared
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memShared.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_shared=uptime_file.read()
if mem_shared !="":
mem_shared=int(mem_shared)
# get mem_buffered
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memBuffer.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_buffered = uptime_file.read()
if mem_buffered !="":
mem_buffered=int(mem_buffered)
# get mem_cached
command="""/usr/bin/snmpwalk -v1 -c %s %s UCD-SNMP-MIB::memCached.0 |cut -d= -f2 |awk -F ' ' '{print $2}' """ %(community, ip)
uptime_file=os.popen(command)
mem_cached = uptime_file.read()
if mem_cached !="":
mem_cached=int(mem_cached)
# calculate mem_available
mem_available = -1
if mem_total != "" and mem_free != "" and swap_avail != "":
mem_available=int(mem_total) - (int(mem_free) - int(swap_avail))
# get mem_usage_rate
mem_usage_rate = ""
if mem_total !="" and mem_total !="0":
mem_usage_rate = int(mem_available)*100/int(mem_total)
#logger.error("mem_usage_rate: %s"%(mem_usage_rate))
#print mem_usage_rate
#print hostname
#print kernel
#print system_date
#print system_uptime
#print process
#print load_1
#print load_5
#print load_15
# disk usage
command=""
if filter_os_disk=="":
command="""/usr/bin/snmpdf -v1 -c %s %s |grep -E "/"|grep -vE "/boot"|grep -vE "DVD" """ %(community, ip)
else:
command="""/usr/bin/snmpdf -v1 -c %s %s |grep -E "/"|grep -vE "/boot"|grep -vE "DVD" |grep -vE "%s" """ %(community, ip, filter_os_disk)
#print command
disk_all=os.popen(command)
result=disk_all.readlines()
if result:
func.mysql_exec("insert into os_disk_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_disk where ip = '%s';" %(ip),'')
for i in range(len(result)):
line=result[i].split()
mounted=line[0]
total_size=line[1]
used_size=line[2]
avail_size=line[3]
used_rate=line[4][:-1]
print mounted, total_size, used_size, avail_size, used_rate
##################### insert data to mysql server#############################
sql = "insert into os_disk(ip,tags,mounted,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s);"
param = (ip, tags, mounted, total_size, used_size, avail_size, used_rate)
func.mysql_exec(sql,param)
#disk io begin
disk_io_reads_total=0
disk_io_writes_total=0
print "get disk io table begin:"
command="""/usr/bin/snmptable -v1 -c %s %s diskIOTable |grep -ivE "ram|loop|md|SNMP table|diskIOIndex" | grep -v '^$' """ %(community, ip)
res_file_1=os.popen(command)
print "wait for 5 seconds..."
time.sleep(5)
res_file_2=os.popen(command)
print "get disk io table end."
res_tab_1=res_file_1.readlines()
res_tab_2=res_file_2.readlines()
if res_tab_2:
func.mysql_exec("insert into os_diskio_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_diskio where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_diskio where ip = '%s';" %(ip),'')
for i in range(len(res_tab_2)):
line_2=res_tab_2[i].split()
fdisk_id_2=line_2[0]
fdisk_name_2=line_2[1]
fdisk_io_reads_2=line_2[4]
fdisk_io_writes_2=line_2[5]
print fdisk_id_2, fdisk_name_2, fdisk_io_reads_2, fdisk_io_writes_2
if res_tab_1:
for j in range(len(res_tab_1)):
line_1=res_tab_1[j].split()
fdisk_id_1=line_1[0]
fdisk_name_1=line_1[1]
fdisk_io_reads_1=line_1[4]
fdisk_io_writes_1=line_1[5]
#print fdisk_id_1
if fdisk_id_2==fdisk_id_1:
fdisk_io_reads=(int(fdisk_io_reads_2) - int(fdisk_io_reads_1))/5
fdisk_io_writes=(int(fdisk_io_writes_2) - int(fdisk_io_writes_1))/5
print fdisk_id_1, fdisk_io_reads, fdisk_io_writes
disk_io_reads_total = disk_io_reads_total + fdisk_io_reads
disk_io_writes_total = disk_io_writes_total + fdisk_io_writes
##################### insert data to mysql server#############################
sql = "insert into os_diskio(ip,tags,fdisk,disk_io_reads,disk_io_writes) values(%s,%s,%s,%s,%s);"
param = (ip, tags, fdisk_name_1, fdisk_io_reads, fdisk_io_writes)
func.mysql_exec(sql,param)
break
#disk io end
#net begin
net_in_bytes_total=0
net_out_bytes_total=0
print "get network begin:"
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifDescr | grep -ivE "lo|sit0" """ %(community, ip)
res_net_file=os.popen(command)
net_str=res_net_file.readlines()
if net_str:
func.mysql_exec("insert into os_net_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_net where ip = '%s';" %(ip),'')
for i in range(len(net_str)):
line_2=net_str[i].split()
net_desc_id=line_2[0].split('.')[1]
net_desc=line_2[3]
#print net_desc_id
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifInOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_in_file_1=os.popen(command)
net_in_bytes_1 = net_in_file_1.readlines()[0]
#print net_in_bytes_1
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifOutOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_out_file_1=os.popen(command)
net_out_bytes_1 = net_out_file_1.readlines()[0]
#print net_out_bytes_1
time.sleep(1)
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifInOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_in_file_2=os.popen(command)
net_in_bytes_2 = net_in_file_2.readlines()[0]
#print net_in_bytes_2
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifOutOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_out_file_2=os.popen(command)
net_out_bytes_2 = net_out_file_2.readlines()[0]
#print net_out_bytes_2
net_in_bytes=int(net_in_bytes_2) - int(net_in_bytes_1)
net_out_bytes=int(net_out_bytes_2) - int(net_out_bytes_1)
print net_desc, net_in_bytes, net_out_bytes
net_in_bytes_total = net_in_bytes_total + net_in_bytes
net_out_bytes_total = net_out_bytes_total + net_out_bytes
##################### insert data to mysql server#############################
sql = "insert into os_net(ip,tags,if_descr,in_bytes,out_bytes) values(%s,%s,%s,%s,%s);"
param = (ip, tags, net_desc, net_in_bytes, net_out_bytes)
func.mysql_exec(sql,param)
#net end
##################### insert data to mysql server#############################
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags,hostname,kernel,system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param = (ip,1,tags, hostname, kernel, system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total)
func.mysql_exec(sql,param)
else:
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_net_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_disk_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_diskio_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_diskio where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_diskio where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags) values(%s,%s,%s)"
param = (ip,0,tags)
func.mysql_exec(sql,param)
# generate OS alert
alert.gen_alert_os_status(ip)
alert.gen_alert_os_disk(ip)
alert.gen_alert_os_network(ip)
mail.send_alert_mail(0, ip)
except Exception, e:
print e.message
logger.error("%s:%s statspack error: %s"%(ip,port,e))
finally:
pass
def check_os_snmp_aix(ip,port,filter_os_disk,tags):
try :
community="public"
# get hostname
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.2.1.1.5.0 | awk '{print $NF}' """ %(community, ip)
res_file=os.popen(command)
hostname=res_file.read()
if hostname != "":
# get kernel
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.2.1.1.1.0 | grep "AIX version" | awk '{print $7}' """ %(community, ip)
res_file=os.popen(command)
kernel=res_file.read().replace('\n','')
# get system_date
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.1.3.1.0 | awk -F '"' '{print $2}' """ %(community, ip)
date_file=os.popen(command)
system_date=date_file.read()
# get system_uptime
#command="""/usr/bin/snmpwalk -v1 -c %s %s SNMPv2-MIB::sysUpTime.0 | awk -F ')' '{print $2}' | awk '$1=$1' """ %(community, ip)
command="""/usr/bin/snmpwalk -v1 -c %s %s HOST-RESOURCES-MIB::hrSystemUptime | awk '{print $NF}' """ %(community, ip)
uptime_file=os.popen(command)
ticks=uptime_file.read()
if ticks !="":
system_uptime=datetime.timedelta(microseconds = int(ticks) * 10000)
# get process
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.7.1.0 | awk '{print $4}' """ %(community, ip)
uptime_file=os.popen(command)
process=uptime_file.read()
if process !="":
process=int(process)
# get load_1
load_1=-1
# get load_5
load_5=-1
# get load_15
load_15=-1
# get cpu_user_time
cpu_user_time=-1
# get cpu_system_time
cpu_system_time=-1
# get cpu_usage
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.1.2.1.0 | awk '{print $4}' """ %(community, ip)
cpu_usage_file=os.popen(command)
cpu_usage=cpu_usage_file.read()
if cpu_usage !="":
cpu_usage=int(cpu_usage)
# get cpu_idle_time
if cpu_usage !="":
cpu_idle_time= 100 - cpu_usage
# get page usage
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.2.4.2.1.5.1 | awk '{print $4}' """ %(community, ip)
page_file=os.popen(command)
page_usage_rate=page_file.read()
if page_usage_rate !="":
page_usage_rate=int(page_usage_rate)
# get page total
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.2.4.2.1.4.1 | awk '{print $4}' """ %(community, ip)
page_total_file=os.popen(command)
swap_total=page_total_file.read()
if swap_total !="":
swap_total=int(swap_total)
# get swap_avail
swap_avail = -1
if page_usage_rate !="" and swap_total !="":
swap_avail=swap_total * (100 - page_usage_rate) / 100
# get mem_total
command="""/usr/bin/snmpwalk -v1 -c %s %s 1.3.6.1.4.1.2.6.191.9.4.1.1.4.1 | awk '{print $4}' """ %(community, ip)
mem_total_file=os.popen(command)
mem_total=mem_total_file.read()
if mem_total !="":
mem_total=int(mem_total)*1024
# get mem_avail
mem_avail= -1
# get mem_free
mem_free=-1
# get mem_shared
mem_shared=-1
# get mem_buffered
mem_buffered =-1
# get mem_cached
mem_cached = -1
# calculate mem_available
mem_available = -1
# get mem_usage_rate
mem_usage_rate = ""
if mem_total !="" and mem_total !="0":
mem_usage_rate = int(mem_available)*100/int(mem_total)
#print mem_usage_rate
#print hostname
#print kernel
#print system_date
#print system_uptime
#print process
#print load_1
#print load_5
#print load_15
# disk usage
command=""
if filter_os_disk=="":
command="""/usr/bin/snmpdf -v1 -c %s %s |grep -E "/"|grep -vE "/boot"|grep -vE "DVD" """ %(community, ip)
else:
command="""/usr/bin/snmpdf -v1 -c %s %s |grep -E "/"|grep -vE "/boot"|grep -vE "DVD" |grep -vE "%s" """ %(community, ip, filter_os_disk)
#print command
disk_all=os.popen(command)
result=disk_all.readlines()
if result:
func.mysql_exec("begin;",'')
func.mysql_exec("insert into os_disk_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_disk where ip = '%s';" %(ip),'')
for i in range(len(result)):
line=result[i].split()
mounted=line[0]
total_size=line[1]
used_size=line[2]
avail_size=line[3]
used_rate=line[4][:-1]
print mounted, total_size, used_size, avail_size, used_rate
##################### insert data to mysql server#############################
sql = "insert into os_disk(ip,tags,mounted,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s);"
param = (ip, tags, mounted, total_size, used_size, avail_size, used_rate)
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
#disk io begin
#disk io end
#net begin
net_in_bytes_total=0
net_out_bytes_total=0
print "get network begin:"
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifDescr | grep -ivE "lo|ent" | awk -F ';' '{print $1}' """ %(community, ip)
res_net_file=os.popen(command)
net_str=res_net_file.readlines()
if net_str:
func.mysql_exec("begin;",'')
func.mysql_exec("insert into os_net_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_net where ip = '%s';" %(ip),'')
for i in range(len(net_str)):
line_2=net_str[i].split()
net_desc_id=line_2[0].split('.')[1]
net_desc=line_2[3]
#print net_desc_id
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifInOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_in_file_1=os.popen(command)
net_in_bytes_1 = net_in_file_1.readlines()[0]
#print net_in_bytes_1
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifOutOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_out_file_1=os.popen(command)
net_out_bytes_1 = net_out_file_1.readlines()[0]
#print net_out_bytes_1
time.sleep(1)
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifInOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_in_file_2=os.popen(command)
net_in_bytes_2 = net_in_file_2.readlines()[0]
#print net_in_bytes_2
command="""/usr/bin/snmpwalk -v1 -c %s %s IF-MIB::ifOutOctets.%s | awk '{print $NF}' """ %(community, ip, net_desc_id)
net_out_file_2=os.popen(command)
net_out_bytes_2 = net_out_file_2.readlines()[0]
#print net_out_bytes_2
net_in_bytes=int(net_in_bytes_2) - int(net_in_bytes_1)
net_out_bytes=int(net_out_bytes_2) - int(net_out_bytes_1)
print net_desc, net_in_bytes, net_out_bytes
net_in_bytes_total = net_in_bytes_total + net_in_bytes
net_out_bytes_total = net_out_bytes_total + net_out_bytes
##################### insert data to mysql server#############################
sql = "insert into os_net(ip,tags,if_descr,in_bytes,out_bytes) values(%s,%s,%s,%s,%s);"
param = (ip, tags, net_desc, net_in_bytes, net_out_bytes)
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
#net end
##################### insert data to mysql server#############################
disk_io_reads_total = -1
disk_io_writes_total = -1
func.mysql_exec("begin;",'')
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags,hostname,kernel,system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param = (ip,1,tags, hostname, kernel, system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total)
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
else:
func.mysql_exec("begin;",'')
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags) values(%s,%s,%s)"
param = (ip,0,tags)
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
# generate OS alert
alert.gen_alert_os_status(ip)
alert.gen_alert_os_disk(ip)
alert.gen_alert_os_network(ip)
mail.send_alert_mail(0, ip)
except Exception, e:
print e.message
logger.error("%s:%s statspack error: %s"%(ip,port,e))
func.mysql_exec("rollback;",'')
finally:
pass
def check_os_winrm(ip, port, username, password, filter_os_disk, tags):
try :
# get winrm session
url = 'http://%s:%s/wsman' %(ip,port)
win = winrm.Session(url,auth=(username,password))
r = win.run_cmd('wmic os get CSName /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
hostname = outstr.replace("CSName=","")
print(hostname)
# get kernel version
r = win.run_cmd('wmic os get caption /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
kernel = outstr.replace("Caption=","")
print(kernel)
# get system_date
r = win.run_cmd('wmic os get LocalDateTime /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
system_date = outstr.replace("LocalDateTime=","")[0:14]
print(system_date)
# get system_uptime
r = win.run_cmd('wmic os get LastBootUpTime /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
system_uptime = outstr.replace("LastBootUpTime=","")[0:14]
print(system_uptime)
# get Process
r = win.run_cmd('wmic process get CommandLine /format:list')
outstr = str(r.std_out.decode())
process = len(outstr.split("CommandLine="))-1
print(process)
load_1 = -1
load_5 = -1
load_15 = -1
cpu_user_time = -1
cpu_system_time = -1
# get cpu_idle_time
swap_total = -1
swap_avail = -1
# get mem_total
# get mem_avail
# get mem_free
# get mem_shared
# get mem_buffered
# get mem_cached
# calculate mem_available
# get mem_usage_rate
# get CPU Idle Percent
r = win.run_cmd('wmic path Win32_PerfFormattedData_PerfOS_Processor where Name="_Total" get PercentIdleTime /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
cpu_idle_time = outstr.replace("PercentIdleTime=","")
print(cpu_idle_time)
# get FreePhysicalMemory
r = win.run_cmd('wmic os get FreePhysicalMemory /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
mem_free = outstr.replace("FreePhysicalMemory=","")
print(mem_free)
# get TotalVisibleMemorySize
r = win.run_cmd('wmic os get TotalVisibleMemorySize /format:list')
outstr = str(r.std_out.decode()).replace("\r","").replace("\n","")
mem_total = outstr.replace("TotalVisibleMemorySize=","")
print(mem_total)
mem_avail = -1
mem_shared = -1
mem_buffered = -1
mem_cached = -1
mem_available = int(mem_total) - int(mem_free)
mem_usage_rate = int(mem_available)*100/int(mem_total)
#disk usage
r = win.run_cmd('wmic LogicalDisk where DriveType=3 get DeviceID /format:list')
outstr = str(r.std_out.decode()).replace("\r","")
list_drive = outstr.split("\n")
for drive in list_drive:
if drive.find("DeviceID=")>=0:
func.mysql_exec("insert into os_disk_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_disk where ip = '%s';" %(ip),'')
break
for drive in list_drive:
if drive.find("DeviceID=")>=0:
disk = drive.replace("DeviceID=","")
print disk
rr = win.run_cmd('wmic LogicalDisk where DeviceID="%s" get FreeSpace /format:list' %(disk))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
free_space = outstr.replace("FreeSpace=","")
free_space = int(free_space) / 1024
print free_space
rr = win.run_cmd('wmic LogicalDisk where DeviceID="%s" get Size /format:list' %(disk))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
total_size = outstr.replace("Size=","")
total_size = int(total_size) / 1024
print total_size
used_size = int(total_size) - int(free_space)
used_rate = used_size * 100 / int(total_size)
##################### insert data to mysql server#############################
sql = "insert into os_disk(ip,tags,mounted,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s);"
param = (ip, tags, disk, total_size, used_size, free_space, used_rate)
func.mysql_exec(sql,param)
#disk io begin
disk_io_reads_total=0
disk_io_writes_total=0
r = win.run_cmd('wmic LogicalDisk where DriveType=3 get DeviceID /format:list')
outstr = str(r.std_out.decode()).replace("\r","")
list_disk = outstr.split("\n")
for i in list_disk:
if i.find("DeviceID=")>=0:
func.mysql_exec("insert into os_diskio_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_diskio where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_diskio where ip = '%s';" %(ip),'')
for i in list_disk:
if i.find("DeviceID=")>=0:
disk = i.replace("DeviceID=","")
print disk
rr = win.run_cmd('wmic path Win32_PerfFormattedData_PerfDisk_LogicalDisk where Name="%s" get DiskReadBytesPersec /format:list' %(disk))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
io_reads = outstr.replace("DiskReadBytesPersec=","")
print(io_reads)
rr = win.run_cmd('wmic path Win32_PerfFormattedData_PerfDisk_LogicalDisk where Name="%s" get DiskWriteBytesPersec /format:list' %(disk))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
io_writes = outstr.replace("DiskWriteBytesPersec=","")
print(io_writes)
disk_io_reads_total = disk_io_reads_total + int(io_reads)
disk_io_writes_total = disk_io_writes_total + int(io_writes)
##################### insert data to mysql server#############################
sql = "insert into os_diskio(ip,tags,fdisk,disk_io_reads,disk_io_writes) values(%s,%s,%s,%s,%s);"
param = (ip, tags, disk, io_reads, io_writes)
func.mysql_exec(sql,param)
#disk io end
#net begin
net_in_bytes_total=0
net_out_bytes_total=0
r = win.run_cmd('wmic path Win32_PerfFormattedData_Tcpip_NetworkInterface get Name /format:list')
outstr = str(r.std_out).replace("\r","")
list_nic = outstr.split("\n")
for i in list_nic:
if i.find("Name=")>=0 and i.find("Ethernet") > 0:
func.mysql_exec("insert into os_net_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_net where ip = '%s';" %(ip),'')
for i in list_nic:
if i.find("Name=")>=0 and i.find("Ethernet") > 0:
nic = i.replace("Name=","")
print nic
rr = win.run_cmd('wmic path Win32_PerfFormattedData_Tcpip_NetworkInterface where Name="%s" get BytesReceivedPersec /format:list' %(nic))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
net_in_bytes = outstr.replace("BytesReceivedPersec=","")
print(net_in_bytes)
rr = win.run_cmd('wmic path Win32_PerfFormattedData_Tcpip_NetworkInterface where Name="%s" get BytesSentPersec /format:list' %(nic))
outstr = str(rr.std_out.decode()).replace("\r","").replace("\n","")
net_out_bytes = outstr.replace("BytesSentPersec=","")
print(net_out_bytes)
net_in_bytes_total = net_in_bytes_total + int(net_in_bytes)
net_out_bytes_total = net_out_bytes_total + int(net_out_bytes)
##################### insert data to mysql server#############################
sql = "insert into os_net(ip,tags,if_descr,in_bytes,out_bytes) values(%s,%s,%s,%s,%s);"
param = (ip, tags, nic, net_in_bytes, net_out_bytes)
func.mysql_exec(sql,param)
#net end
##################### insert data to mysql server#############################
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags,hostname,kernel,system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param = (ip,1,tags, hostname, kernel, system_date,system_uptime,process,load_1,load_5,load_15,cpu_user_time,cpu_system_time,cpu_idle_time,swap_total,swap_avail,mem_total,mem_avail,mem_free,mem_shared,mem_buffered,mem_cached,mem_usage_rate,mem_available,disk_io_reads_total,disk_io_writes_total,net_in_bytes_total,net_out_bytes_total)
func.mysql_exec(sql,param)
# generate OS alert
alert.gen_alert_os_status(ip)
alert.gen_alert_os_disk(ip)
alert.gen_alert_os_network(ip)
mail.send_alert_mail(0, ip)
except Exception, e:
print e.message
logger.error("%s:%s statspack error: %s"%(ip,port,e))
func.mysql_exec("insert into os_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_status where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_net_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_net where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_disk_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_disk where ip = '%s';" %(ip),'')
func.mysql_exec("insert into os_diskio_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from os_diskio where ip = '%s';" %(ip),'')
func.mysql_exec("delete from os_diskio where ip = '%s';" %(ip),'')
sql = "insert into os_status(ip,connect,tags) values(%s,%s,%s)"
param = (ip,0,tags)
func.mysql_exec(sql,param)
alert.gen_alert_os_status(ip)
finally:
pass
def clean_invalid_os_status():
try:
func.mysql_exec("insert into os_status_his SELECT *,sysdate() from os_status where ip not in(select host from db_cfg_os);",'')
func.mysql_exec('delete from os_status where ip not in(select host from db_cfg_os);','')
func.mysql_exec("insert into os_disk_his SELECT *,sysdate() from os_disk where ip not in(select host from db_cfg_os);",'')
func.mysql_exec('delete from os_disk where ip not in(select host from db_cfg_os);','')
func.mysql_exec("insert into os_diskio_his SELECT *,sysdate() from os_diskio where ip not in(select host from db_cfg_os);",'')
func.mysql_exec('delete from os_diskio where ip not in(select host from db_cfg_os);','')
func.mysql_exec("insert into os_net_his SELECT *,sysdate() from os_net where ip not in(select host from db_cfg_os);",'')
func.mysql_exec('delete from os_net where ip not in(select host from db_cfg_os);','')
func.mysql_exec("delete from db_status where db_type = 'os' and host not in(select host from db_cfg_os);",'')
except Exception, e:
logger.error(e)
finally:
pass
def main():
#get os servers list
servers=func.mysql_query("select host,host_type,protocol,port,username,password,filter_os_disk,tags from db_cfg_os where is_delete=0 and monitor=1 ;")
logger.info("check os controller started.")
if servers:
func.update_check_time('os')
plist = []
for row in servers:
host=row[0]
host_type=row[1]
protocol=row[2]
port=row[3]
username=row[4]
password=row[5]
filter_os_disk=row[6]
tags=row[7]
if host <> '' :
#thread.start_new_thread(check_os, (host,community,filter_os_disk,tags))
#time.sleep(1)
if protocol == 'snmp':
if host_type == 0:
p = Process(target = check_os_snmp_linux, args=(host,port,filter_os_disk,tags))
plist.append(p)
p.start()
elif host_type == 1:
p = Process(target = check_os_snmp_aix, args=(host,port,filter_os_disk,tags))
plist.append(p)
p.start()
elif protocol == 'winrm':
p = Process(target = check_os_winrm, args=(host,port,username,password,filter_os_disk,tags))
plist.append(p)
p.start()
for p in plist:
p.join()
else:
logger.warning("check os: not found any servers")
logger.info("check os controller finished.")
logger.info("Clean invalid os status start.")
clean_invalid_os_status()
logger.info("Clean invalid os status finished.")
if __name__=='__main__':
main()
|
import json
import urllib
from tempest.common.rest_client import RestClient
class HostsClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(HostsClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def list_hosts(self, params=None):
"""Lists all hosts."""
url = 'os-hosts'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['hosts']
def show_host_detail(self, hostname):
"""Show detail information for the host."""
resp, body = self.get("os-hosts/%s" % str(hostname))
body = json.loads(body)
return resp, body['host']
def update_host(self, hostname, **kwargs):
"""Update a host."""
request_body = {
'status': None,
'maintenance_mode': None,
}
request_body.update(**kwargs)
request_body = json.dumps(request_body)
resp, body = self.put("os-hosts/%s" % str(hostname), request_body,
self.headers)
body = json.loads(body)
return resp, body
def startup_host(self, hostname):
"""Startup a host."""
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
body = json.loads(body)
return resp, body['host']
def shutdown_host(self, hostname):
"""Shutdown a host."""
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
body = json.loads(body)
return resp, body['host']
def reboot_host(self, hostname):
"""reboot a host."""
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
body = json.loads(body)
return resp, body['host']
|
"""Utilities and helper functions."""
import contextlib
import errno
import inspect
import os
import pyclbr
import shutil
import socket
import sys
import tempfile
from eventlet import pools
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import timeutils
import paramiko
import retrying
import six
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('manila-')
def _get_root_helper():
return 'sudo manila-rootwrap %s' % CONF.rootwrap_config
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() function."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
def trycmd(*args, **kwargs):
"""Convenience wrapper around oslo's trycmd() function."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.trycmd(*args, **kwargs)
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.path_to_private_key = privatekey
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
look_for_keys = True
if self.path_to_private_key:
self.path_to_private_key = os.path.expanduser(
self.path_to_private_key)
look_for_keys = False
elif self.password:
look_for_keys = False
try:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
key_filename=self.path_to_private_key,
look_for_keys=look_for_keys,
timeout=self.conn_timeout)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set through the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Check whether private key or password are correctly "
"set. Error connecting via ssh: %s") % e
LOG.error(msg)
raise exception.SSHException(msg)
def get(self):
"""Return an item from the pool, when one is available.
This may cause the calling greenthread to block. Check if a
connection is active before returning it. For dead connections
create and return a new connection.
"""
if self.free_items:
conn = self.free_items.popleft()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
if self.current_size < self.max_size:
created = self.create()
self.current_size += 1
return created
return self.channel.get()
def remove(self, ssh):
"""Close an ssh client and remove it from free_items."""
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
def delete_if_exists(pathname):
"""Delete a file, but ignore file not found error."""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_ipv6_configured():
"""Check if system contain IPv6 capable network interface.
:rtype: bool
:raises: IOError
"""
try:
fd = open('/proc/net/if_inet6')
except IOError as e:
if e.errno != errno.ENOENT:
raise
result = False
else:
result = bool(fd.read(32))
fd.close()
return result
def is_eventlet_bug105():
"""Check if eventlet support IPv6 addresses.
See https://bitbucket.org/eventlet/eventlet/issue/105
:rtype: bool
"""
try:
mod = sys.modules['eventlet.support.greendns']
except KeyError:
return False
try:
connect_data = mod.getaddrinfo('::1', 80)
except socket.gaierror:
return True
fail = [x for x in connect_data if x[0] != socket.AF_INET6]
return bool(fail)
def monkey_patch():
"""Patch decorator.
If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'manila.api.ec2.cloud:' \
manila.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See manila.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
# NOTE(vponomaryov): we need to distinguish class methods types
# for py2 and py3, because the concept of 'unbound methods' has
# been removed from the python3.x
if six.PY3:
member_type = inspect.isfunction
else:
member_type = inspect.ismethod
for method, func in inspect.getmembers(clz, member_type):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = timeutils.total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= CONF.service_down_time
def validate_service_host(context, host):
service = db_api.service_get_by_host_and_topic(context, host,
'manila-share')
if not service_is_up(service):
raise exception.ServiceIsDown(service=service['host'])
return service
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s', six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def cidr_to_netmask(cidr):
"""Convert cidr to netmask."""
try:
network = netaddr.IPNetwork(cidr)
return str(network.netmask)
except netaddr.AddrFormatError:
raise exception.InvalidInput(_("Invalid cidr supplied %s") % cidr)
def is_valid_ip_address(ip_address, ip_version):
if int(ip_version) == 4:
return netaddr.valid_ipv4(ip_address)
elif int(ip_version) == 6:
return netaddr.valid_ipv6(ip_address)
else:
raise exception.ManilaException(
_("Provided improper IP version '%s'.") % ip_version)
class IsAMatcher(object):
def __init__(self, expected_value=None):
self.expected_value = expected_value
def __eq__(self, actual_value):
return isinstance(actual_value, self.expected_value)
def retry(exception, interval=1, retries=10, backoff_rate=2):
"""A wrapper around retrying library.
This decorator allows to log and to check 'retries' input param.
Time interval between retries is calculated in the following way:
interval * backoff_rate ^ previous_attempt_number
:param exception: expected exception type. When wrapped function
raises an exception of this type,the function
execution is retried.
:param interval: param 'interval' is used to calculate time interval
between retries:
interval * backoff_rate ^ previous_attempt_number
:param retries: number of retries
:param backoff_rate: param 'backoff_rate' is used to calculate time
interval between retries:
interval * backoff_rate ^ previous_attempt_number
"""
def _retry_on_exception(e):
return isinstance(e, exception)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
LOG.debug("Sleeping for %s seconds", wait_for)
return wait_for * 1000.0
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError(_('Retries must be greater than or '
'equal to 1 (received: %s).') % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
|
from google.cloud import vmmigration_v1
async def sample_get_clone_job():
# Create a client
client = vmmigration_v1.VmMigrationAsyncClient()
# Initialize request argument(s)
request = vmmigration_v1.GetCloneJobRequest(
name="name_value",
)
# Make the request
response = await client.get_clone_job(request=request)
# Handle the response
print(response)
|
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
@test_util.run_gpu_only
def testGPUInt64(self):
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32."):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
self.assertAllEqual(
tensor_util.constant_value(
resource_variable_ops.variable_shape(v.handle)),
[2])
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertGreater(len(handle.eval()), 0)
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to assign variable with wrong "
"dtype. Expected int32 got float."):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNd(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1, 1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads), [[0., 0.], [0., 1.]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNdIndexedSlices(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1], [1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads.values), [[1., 1.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.5], name="add")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_add(ops.IndexedSlices(indices=[1], values=[2.5])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterSubVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 2.5], name="sub")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_sub(ops.IndexedSlices(indices=[1], values=[1.5])))
self.assertAllEqual([0.0, 1.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMaxVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="max1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 5.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="max2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.5], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMinVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="min1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="min2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 2.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMulVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="mul")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_mul(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 12.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterDivVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="div")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_div(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="update")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_update(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testHandleNumpy(self):
with context.eager_mode():
with self.assertRaises(ValueError):
resource_variable_ops.ResourceVariable(
1.0, name="handle-numpy").handle.numpy()
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertTrue(getattr(v_prime, "_cached_value", None) is None)
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertTrue(other_v_prime._cached_value is not None)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_in_graph_and_eager_modes
def testGatherNd(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value_op = v.gather_nd([[0, 0], [1, 2], [3, 3]])
self.assertAllEqual([3, 4], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([[0, 1, 2, 3], [24, 25, 26, 27], [60, 61, 62, 63]],
value)
value_op = v.gather_nd([[0, 0, 0], [1, 2, 3], [3, 3, 3]])
self.assertAllEqual([3], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([0, 27, 63], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
self.assertEquals(v._handle, w._handle)
self.assertEquals(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError("Resource .*/var5/.* does not exist"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "initializer"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertTrue(isinstance(v.handle, ops.EagerTensor))
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
ops.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
@test_util.run_in_graph_and_eager_modes
def testUnreadVariableInsideFunction(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def assign():
v.assign(1.0)
graph = assign.get_concrete_function().graph
self.assertTrue(all(x.type != "ReadVariableOp"
for x in graph.get_operations()))
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegexp(Exception, pattern):
self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
self.assertIsNone(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape, dtype=stored_dtype)])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to
# tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDims(self, params, indices, batch_dims, expected):
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=0,
output_shape=[2, 3, 8, 9, 10, 3, 4, 5, 6, 7]
# = indices.shape + params.shape[1:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=1,
output_shape=[2, 3, 8, 9, 10, 4, 5, 6, 7]
# = params.shape[:1] + indices.shape[1:] + params.shape[2:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 9, 10],
batch_dims=3,
output_shape=[2, 3, 4, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[3:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 5, 10],
batch_dims=4,
output_shape=[2, 3, 4, 5, 10, 7]
# = params.shape[:4] + indices.shape[4:] + params.shape[5:]
),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDimsMatchesTensor(self, params_shape, indices_shape,
batch_dims, output_shape):
"""Checks that gather with batch_dims returns the correct shape."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size, dtype=np.int32), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size, dtype=np.int32), indices_shape)
indices = indices % params_shape[batch_dims]
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
expected = array_ops.gather(
var.read_value(), indices, batch_dims=batch_dims)
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
|
"""Turns arbitrary objects into tf.CompositeTensor."""
import contextlib
import functools
import threading
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import tf_inspect
__all__ = [
'auto_composite_tensor',
'AutoCompositeTensor',
'is_deferred_assertion_context',
]
_DEFERRED_ASSERTION_CONTEXT = threading.local()
_DEFERRED_ASSERTION_CONTEXT.is_deferred = False
def is_deferred_assertion_context():
return getattr(_DEFERRED_ASSERTION_CONTEXT, 'is_deferred', False)
@contextlib.contextmanager
def _deferred_assertion_context(is_deferred=True):
was_deferred = getattr(_DEFERRED_ASSERTION_CONTEXT, 'is_deferred', False)
_DEFERRED_ASSERTION_CONTEXT.is_deferred = is_deferred
try:
yield
finally:
_DEFERRED_ASSERTION_CONTEXT.is_deferred = was_deferred
_registry = {} # Mapping from (python pkg, class name) -> class.
_SENTINEL = object()
_AUTO_COMPOSITE_TENSOR_VERSION = 3
_sig_cache = {}
def _cached_signature(f):
if f not in _sig_cache:
_sig_cache[f] = tf_inspect.signature(f)
return _sig_cache[f]
def _extract_init_kwargs(obj, omit_kwargs=(), limit_to=None,
prefer_static_value=()):
"""Extract constructor kwargs to reconstruct `obj`."""
# If `obj` inherits its constructor from `AutoCompositeTensor` (which inherits
# its constructor from `object`) return an empty dictionary to avoid
# triggering the error below due to *args and **kwargs in the constructor.
if type(obj).__init__ is AutoCompositeTensor.__init__:
return {}
sig = _cached_signature(type(obj).__init__)
if any(v.kind in (tf_inspect.Parameter.VAR_KEYWORD,
tf_inspect.Parameter.VAR_POSITIONAL)
for v in sig.parameters.values()):
raise ValueError(
'*args and **kwargs are not supported. Found `{}`'.format(sig))
keys = [p for p in sig.parameters if p != 'self' and p not in omit_kwargs]
if limit_to is not None:
keys = [k for k in keys if k in limit_to]
kwargs = {}
not_found = object()
for k in keys:
src1 = getattr(obj, k, not_found)
if src1 is not not_found:
kwargs[k] = src1
else:
src2 = getattr(obj, '_' + k, not_found)
if src2 is not not_found:
kwargs[k] = src2
else:
src3 = getattr(obj, 'parameters', {}).get(k, not_found)
if src3 is not not_found:
kwargs[k] = src3
else:
raise ValueError(
f'Could not determine an appropriate value for field `{k}` in'
f' object `{obj}`. Looked for \n'
f' 1. an attr called `{k}`,\n'
f' 2. an attr called `_{k}`,\n'
f' 3. an entry in `obj.parameters` with key "{k}".')
if k in prefer_static_value and kwargs[k] is not None:
if tf.is_tensor(kwargs[k]):
static_val = tf.get_static_value(kwargs[k])
if static_val is not None:
kwargs[k] = static_val
if isinstance(kwargs[k], (np.ndarray, np.generic)):
# Generally, these are shapes or int, but may be other parameters such as
# `power` for `tfb.PowerTransform`.
kwargs[k] = kwargs[k].tolist()
return kwargs
def _extract_type_spec_recursively(value):
"""Return (collection of) TypeSpec(s) for `value` if it includes `Tensor`s.
If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If
`value` is a collection containing `Tensor` values, recursively supplant them
with their respective `TypeSpec`s in a collection of parallel stucture.
If `value` is nont of the above, return it unchanged.
Args:
value: a Python `object` to (possibly) turn into a (collection of)
`tf.TypeSpec`(s).
Returns:
spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`
or `value`, if no `Tensor`s are found.
"""
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
if isinstance(value, tf.Variable):
return resource_variable_ops.VariableSpec(
value.shape, dtype=value.dtype, trainable=value.trainable)
if tf.is_tensor(value):
return tf.TensorSpec(value.shape, value.dtype)
if tf.nest.is_nested(value):
specs = tf.nest.map_structure(_extract_type_spec_recursively, value)
was_tensor = tf.nest.flatten(
tf.nest.map_structure(lambda a, b: a is not b, value, specs))
has_tensors = any(was_tensor)
has_only_tensors = all(was_tensor)
if has_tensors:
if has_tensors != has_only_tensors:
raise NotImplementedError(
'Found `{}` with both Tensor and non-Tensor parts: {}'.format(
type(value), value))
return specs
return value
class _AutoCompositeTensorTypeSpec(type_spec.BatchableTypeSpec):
"""A tf.TypeSpec for `AutoCompositeTensor` objects."""
__slots__ = ('_param_specs', '_non_tensor_params', '_omit_kwargs',
'_prefer_static_value', '_callable_params', '_serializable',
'_comparable')
def __init__(self, param_specs, non_tensor_params, omit_kwargs,
prefer_static_value, non_identifying_kwargs,
callable_params=None):
"""Initializes a new `_AutoCompositeTensorTypeSpec`.
Args:
param_specs: Python `dict` of `tf.TypeSpec` instances that describe
kwargs to the `AutoCompositeTensor`'s constructor that are `Tensor`-like
or `CompositeTensor` subclasses.
non_tensor_params: Python `dict` containing non-`Tensor` and non-
`CompositeTensor` kwargs to the `AutoCompositeTensor`'s constructor.
omit_kwargs: Python `tuple` of strings corresponding to the names of
kwargs to the `AutoCompositeTensor`'s constructor that should be omitted
from the `_AutoCompositeTensorTypeSpec`'s serialization, equality/
compatibility checks, and rebuilding of the `AutoCompositeTensor` from
`Tensor` components.
prefer_static_value: Python `tuple` of strings corresponding to the names
of `Tensor`-like kwargs to the `AutoCompositeTensor`s constructor that
may be stored as static values, if known. These are typically shapes or
axis values.
non_identifying_kwargs: Python `tuple` of strings corresponding to the
names of kwargs to the `AutoCompositeTensor`s constructor whose values
are not relevant to the unique identification of the
`_AutoCompositeTensorTypeSpec` instance. Equality/comparison checks and
`__hash__` do not depend on these kwargs.
callable_params: Python `dict` of callable kwargs to the
`AutoCompositeTensor`'s constructor that do not subclass
`CompositeTensor`, or `None`. If `callable_params` is a non-empty
`dict`, then serialization of the `_AutoCompositeTensorTypeSpec` is not
supported. Defaults to `None`, which is converted to an empty `dict`.
"""
self._param_specs = param_specs
self._non_tensor_params = non_tensor_params
self._omit_kwargs = omit_kwargs
self._prefer_static_value = prefer_static_value
self._non_identifying_kwargs = non_identifying_kwargs
self._callable_params = {} if callable_params is None else callable_params
self._serializable = (
_AUTO_COMPOSITE_TENSOR_VERSION,
self._param_specs,
self._non_tensor_params,
self._omit_kwargs,
self._prefer_static_value,
self._non_identifying_kwargs)
def remove_kwargs(d):
return {k: v for k, v in d.items()
if k not in self._non_identifying_kwargs}
self._comparable = (
_AUTO_COMPOSITE_TENSOR_VERSION,
remove_kwargs(self._param_specs),
remove_kwargs(self._non_tensor_params),
self._omit_kwargs,
self._prefer_static_value,
self._non_identifying_kwargs,
tf.nest.map_structure(id, remove_kwargs(self._callable_params)))
@classmethod
def from_instance(cls, instance, omit_kwargs=(), non_identifying_kwargs=()):
cls_value_type = cls.value_type.fget(None)
if type(instance) is not cls_value_type: # pylint: disable=unidiomatic-typecheck
raise ValueError(f'`{type(instance).__name__}` has inherited the '
f'`_type_spec` of `{cls_value_type.__name__}`. It '
f'should define its own, either directly, or by '
f'applying `auto_composite_tensor` to '
f'`{type(instance).__name__}.`')
prefer_static_value = tuple(
getattr(instance, '_composite_tensor_shape_params', ()))
kwargs = _extract_init_kwargs(instance, omit_kwargs=omit_kwargs,
prefer_static_value=prefer_static_value)
non_tensor_params = {}
param_specs = {}
callable_params = {}
for k, v in list(kwargs.items()):
# If v contains no Tensors, this will just be v
type_spec_or_v = _extract_type_spec_recursively(v)
if type_spec_or_v is not v:
param_specs[k] = type_spec_or_v
elif callable(v):
callable_params[k] = v
else:
non_tensor_params[k] = v
# Construct the spec.
return cls(param_specs=param_specs,
non_tensor_params=non_tensor_params,
omit_kwargs=omit_kwargs,
prefer_static_value=prefer_static_value,
non_identifying_kwargs=non_identifying_kwargs,
callable_params=callable_params)
def _to_components(self, obj):
return _extract_init_kwargs(obj, limit_to=list(self._param_specs))
def _from_components(self, components):
kwargs = dict(
self._non_tensor_params, **self._callable_params, **components)
with _deferred_assertion_context():
return self.value_type(**kwargs)
@property
def _component_specs(self):
return self._param_specs
def _serialize(self):
if self._callable_params:
raise ValueError(
f'Cannot serialize object with callable parameters that are not '
f'`CompositeTensor`s: {self._callable_params.keys()}.')
return self._serializable
@classmethod
def _deserialize(cls, encoded):
version = encoded[0]
if version == 1:
encoded = encoded + ((),)
version = 2
if version == 2:
encoded = encoded + ((),)
version = 3
if version != _AUTO_COMPOSITE_TENSOR_VERSION:
raise ValueError(f'Expected version {_AUTO_COMPOSITE_TENSOR_VERSION},'
f' but got {version}.')
return cls(*encoded[1:])
def is_subtype_of(self, other):
"""Returns True if `self` is subtype of `other`.
Args:
other: A `TypeSpec`.
"""
# pylint: disable=protected-access
if type(self) is not type(
other) or self._callable_params != other._callable_params:
return False
try:
tf.nest.assert_same_structure(self._comparable[:-1],
other._comparable[:-1])
except (TypeError, ValueError):
return False
self_elements = tf.nest.flatten(self._comparable[:-1])
other_elements = tf.nest.flatten(other._comparable[:-1])
def is_subtype_or_equal(a, b):
try:
return a.is_subtype_of(b)
except AttributeError:
return a == b
return all(
is_subtype_or_equal(self_element, other_element)
for (self_element, other_element) in zip(self_elements, other_elements))
def most_specific_common_supertype(self, others):
"""Returns the most specific supertype of `self` and `others`.
Args:
others: A Sequence of `TypeSpec`.
Returns `None` if a supertype does not exist.
"""
# pylint: disable=protected-access
if not all(
type(self) is type(other) and
self._callable_params == other._callable_params for other in others):
return None
try:
for other in others:
tf.nest.assert_same_structure(self._comparable[:-1],
other._comparable[:-1])
except (TypeError, ValueError):
return None
self_elements = tf.nest.flatten(self._comparable[:-1])
others_elements = [
tf.nest.flatten(other._comparable[:-1]) for other in others
]
def common_supertype_or_equal(a, bs):
try:
return a.most_specific_common_supertype(bs)
except AttributeError:
return a if all(a == b for b in bs) else None
common_elements = [None] * len(self_elements)
for i, self_element in enumerate(self_elements):
common_elements[i] = common_supertype_or_equal(
self_element,
[other_elements[i] for other_elements in others_elements])
if self_element is not None and common_elements[i] is None:
return None
common_comparable = tf.nest.pack_sequence_as(self._comparable[:-1],
common_elements)
return type(self)(*common_comparable[1:], self._callable_params)
# TODO(b/221472813): Delete this once default is deprecated.
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Deprecated. Use most_specific_common_supertype instead.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
ValueError: If the `_callable_params` attributes of `self` and `other` are
not equal.
"""
if type(self) is not type(other):
raise ValueError(
f'No TypeSpec is compatible with both {self} and {other}.')
# pylint: disable=protected-access
if self._callable_params != other._callable_params:
raise ValueError(f'Callable parameters must be identical. Saw '
f'{self._callable_params} and {other._callable_params}.')
merged = self._TypeSpec__most_specific_compatible_type_serialization(
self._comparable[:-1], other._comparable[:-1])
# pylint: enable=protected-access
return type(self)(*merged[1:], self._callable_params)
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
if not isinstance(spec_or_value, tf.TypeSpec):
spec_or_value = type_spec.type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self._TypeSpec__is_compatible(
self._comparable, spec_or_value._comparable) # pylint: disable=protected-access
def _copy(self, **overrides):
kwargs = {
'param_specs': self._param_specs,
'non_tensor_params': self._non_tensor_params,
'omit_kwargs': self._omit_kwargs,
'prefer_static_value': self._prefer_static_value,
'non_identifying_kwargs': self._non_identifying_kwargs,
'callable_params': self._callable_params}
kwargs.update(overrides)
return type(self)(**kwargs)
def _with_tensor_ranks_only(self):
"""Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.
Returns:
A `TypeSpec` that is compatible with `self`, where any `TensorShape`
information has been relaxed to include only tensor rank (and not
the dimension sizes for individual axes).
"""
def relax(value):
if isinstance(value, tf.TypeSpec):
return value._with_tensor_ranks_only() # pylint: disable=protected-access
elif (isinstance(value, tf.TensorShape) and
value.rank is not None):
return tf.TensorShape([None] * value.rank)
else:
return value
return self._copy(
param_specs=tf.nest.map_structure(relax, self._param_specs))
def _without_tensor_names(self):
"""Returns a TypeSpec compatible with `self`, with tensor names removed.
Returns:
A `TypeSpec` that is compatible with `self`, where the name of any
`TensorSpec` is set to `None`.
"""
def rename(value):
if isinstance(value, tf.TypeSpec):
return value._without_tensor_names() # pylint: disable=protected-access
else:
return value
return self._copy(
param_specs=tf.nest.map_structure(rename, self._param_specs))
def __get_cmp_key(self):
return (type(self), self._TypeSpec__make_cmp_key(self._comparable))
def __repr__(self):
return '%s%r' % (
type(self).__name__, self._serializable + (self._callable_params,))
def __reduce__(self):
if self._callable_params:
raise ValueError(
f'Cannot serialize object with callable parameters that are not '
f'`CompositeTensor`s: {self._callable_params.keys()}.')
return super(_AutoCompositeTensorTypeSpec, self).__reduce__()
def __eq__(self, other):
return (type(other) is type(self) and
self.__get_cmp_key() == other.__get_cmp_key()) # pylint: disable=protected-access
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__get_cmp_key())
def _batch(self, batch_size):
"""Returns a TypeSpec representing a batch of objects with this TypeSpec."""
# This method recursively adds a batch dimension to all parameter Tensors.
# Note that this may result in parameter shapes that do not broadcast. You
# may wish to first call
# `dist = dist._broadcast_parameters_with_batch_shape(tf.ones_like(
# `dist.batch_shape_tensor()))` to ensure that the parameters of a
# Distribution or analogous object will continue to broadcast after
# batching.
return self._copy(
param_specs=tf.nest.map_structure(
lambda spec: spec._batch(batch_size), # pylint: disable=protected-access
self._param_specs))
def _unbatch(self):
"""Returns a TypeSpec representing a single element of this TypeSpec."""
return self._copy(
param_specs=tf.nest.map_structure(
lambda spec: spec._unbatch(), # pylint: disable=protected-access
self._param_specs))
class AutoCompositeTensor(composite_tensor.CompositeTensor):
"""Recommended base class for `@auto_composite_tensor`-ified classes.
See details in `tfp.experimental.auto_composite_tensor` description.
"""
@property
def _type_spec(self):
# This property will be overwritten by the `@auto_composite_tensor`
# decorator. However, we need it so that a valid subclass of the `ABCMeta`
# class `CompositeTensor` can be constructed and passed to the
# `@auto_composite_tensor` decorator
pass
def type_spec_register(name, allow_overwrite=True):
"""Decorator used to register a unique name for a TypeSpec subclass.
Unlike TensorFlow's `type_spec.register`, this function allows a new
`TypeSpec` to be registered with a `name` that already appears in the
registry (overwriting the `TypeSpec` already registered with that name). This
allows for re-definition of `AutoCompositeTensor` subclasses in test
environments and iPython.
Args:
name: The name of the type spec. Must have the form
`"{project_name}.{type_name}"`. E.g. `"my_project.MyTypeSpec"`.
allow_overwrite: `bool`, if `True` then the entry in the `TypeSpec` registry
keyed by `name` will be overwritten if it exists. If `False`, then
behavior is the same as `type_spec.register`.
Returns:
A class decorator that registers the decorated class with the given name.
"""
# pylint: disable=protected-access
if allow_overwrite and name in type_spec._NAME_TO_TYPE_SPEC:
type_spec._TYPE_SPEC_TO_NAME.pop(
type_spec._NAME_TO_TYPE_SPEC.pop(name))
return type_spec.register(name)
def auto_composite_tensor(
cls=None, omit_kwargs=(), non_identifying_kwargs=(), module_name=None):
"""Automagically generate `CompositeTensor` behavior for `cls`.
`CompositeTensor` objects are able to pass in and out of `tf.function` and
`tf.while_loop`, or serve as part of the signature of a TF saved model.
The contract of `auto_composite_tensor` is that all __init__ args and kwargs
must have corresponding public or private attributes (or properties). Each of
these attributes is inspected (recursively) to determine whether it is (or
contains) `Tensor`s or non-`Tensor` metadata. Nested (`list`, `tuple`, `dict`,
etc) attributes are supported, but must either contain *only* `Tensor`s (or
lists, etc, thereof), or *no* `Tensor`s. E.g.,
- object.attribute = [1., 2., 'abc'] # valid
- object.attribute = [tf.constant(1.), [tf.constant(2.)]] # valid
- object.attribute = ['abc', tf.constant(1.)] # invalid
If the attribute is a callable, serialization of the `TypeSpec`, and therefore
interoperability with `tf.saved_model`, is not currently supported. As a
workaround, callables that do not contain or close over `Tensor`s may be
expressed as functors that subclass `AutoCompositeTensor` and used in place of
the original callable arg:
```python
@auto_composite_tensor(module_name='my.module')
class F(AutoCompositeTensor):
def __call__(self, *args, **kwargs):
return original_callable(*args, **kwargs)
```
Callable objects that do contain or close over `Tensor`s should either
(1) subclass `AutoCompositeTensor`, with the `Tensor`s passed to the
constructor, (2) subclass `CompositeTensor` and implement their own
`TypeSpec`, or (3) have a conversion function registered with
`type_spec.register_type_spec_from_value_converter`.
If the object has a `_composite_tensor_shape_parameters` field (presumed to
have `tuple` of `str` value), the flattening code will use
`tf.get_static_value` to attempt to preserve shapes as static metadata, for
fields whose name matches a name specified in that field. Preserving static
values can be important to correctly propagating shapes through a loop.
Note that the Distribution and Bijector base classes provide a
default implementation of `_composite_tensor_shape_parameters`, populated by
`parameter_properties` annotations.
If the decorated class `A` does not subclass `CompositeTensor`, a *new class*
will be generated, which mixes in `A` and `CompositeTensor`.
To avoid this extra class in the class hierarchy, we suggest inheriting from
`auto_composite_tensor.AutoCompositeTensor`, which inherits from
`CompositeTensor` and implants a trivial `_type_spec` @property. The
`@auto_composite_tensor` decorator will then overwrite this trivial
`_type_spec` @property. The trivial one is necessary because `_type_spec` is
an abstract property of `CompositeTensor`, and a valid class instance must be
created before the decorator can execute -- without the trivial `_type_spec`
property present, `ABCMeta` will throw an error! The user may thus do any of
the following:
#### `AutoCompositeTensor` base class (recommended)
```python
@tfp.experimental.auto_composite_tensor
class MyClass(tfp.experimental.AutoCompositeTensor):
...
mc = MyClass()
type(mc)
# ==> MyClass
```
#### No `CompositeTensor` base class (ok, but changes expected types)
```python
@tfp.experimental.auto_composite_tensor
class MyClass(object):
...
mc = MyClass()
type(mc)
# ==> MyClass_AutoCompositeTensor
```
#### `CompositeTensor` base class, requiring trivial `_type_spec`
```python
from tensorflow.python.framework import composite_tensor
@tfp.experimental.auto_composite_tensor
class MyClass(composite_tensor.CompositeTensor):
@property
def _type_spec(self): # will be overwritten by @auto_composite_tensor
pass
...
mc = MyClass()
type(mc)
# ==> MyClass
```
## Full usage example
```python
@tfp.experimental.auto_composite_tensor(omit_kwargs=('name',))
class Adder(tfp.experimental.AutoCompositeTensor):
def __init__(self, x, y, name=None):
with tf.name_scope(name or 'Adder') as name:
self._x = tf.convert_to_tensor(x)
self._y = tf.convert_to_tensor(y)
self._name = name
def xpy(self):
return self._x + self._y
def body(obj):
return Adder(obj.xpy(), 1.),
result, = tf.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(Adder(1., 1.),),
maximum_iterations=3)
result.xpy() # => 5.
```
Args:
cls: The class for which to create a CompositeTensor subclass.
omit_kwargs: Optional sequence of kwarg names to be omitted from the spec.
non_identifying_kwargs: Optional sequence of kwarg names to be omitted from
equality/comparison checks and the `__hash__` method of the spec.
module_name: The module name with which to register the `TypeSpec`. If
`None`, defaults to `cls.__module__`.
Returns:
composite_tensor_subclass: A subclass of `cls` and TF CompositeTensor.
"""
if cls is None:
return functools.partial(auto_composite_tensor,
omit_kwargs=omit_kwargs,
non_identifying_kwargs=non_identifying_kwargs,
module_name=module_name)
if module_name is None:
module_name = cls.__module__
type_spec_class_name = f'{cls.__name__}_ACTTypeSpec'
type_spec_name = f'{module_name}.{type_spec_class_name}'
# If the declared class is already a CompositeTensor subclass, we can avoid
# affecting the actual type of the returned class. Otherwise, we need to
# explicitly mix in the CT type, and hence create and return a newly
# synthesized type.
if issubclass(cls, composite_tensor.CompositeTensor):
@type_spec_register(type_spec_name)
class _AlreadyCTTypeSpec(_AutoCompositeTensorTypeSpec):
@property
def value_type(self):
return cls
_AlreadyCTTypeSpec.__name__ = type_spec_class_name
def _type_spec(obj):
return _AlreadyCTTypeSpec.from_instance(
obj, omit_kwargs, non_identifying_kwargs)
cls._type_spec = property(_type_spec) # pylint: disable=protected-access
return cls
clsid = (cls.__module__, cls.__name__, omit_kwargs,
non_identifying_kwargs)
# Check for subclass if retrieving from the _registry, in case the user
# has redefined the class (e.g. in a REPL/notebook).
if clsid in _registry and issubclass(_registry[clsid], cls):
return _registry[clsid]
class _GeneratedCTTypeSpec(_AutoCompositeTensorTypeSpec):
@property
def value_type(self):
return _registry[clsid]
_GeneratedCTTypeSpec.__name__ = type_spec_class_name
class _AutoCompositeTensor(cls, composite_tensor.CompositeTensor):
"""A per-`cls` subclass of `CompositeTensor`."""
@property
def _type_spec(self):
return _GeneratedCTTypeSpec.from_instance(
self, omit_kwargs, non_identifying_kwargs)
_AutoCompositeTensor.__name__ = cls.__name__
_registry[clsid] = _AutoCompositeTensor
type_spec_register(type_spec_name)(_GeneratedCTTypeSpec)
return _AutoCompositeTensor
|
import contextlib
try:
import unittest.mock as mock
except ImportError:
import mock
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.httputil import HTTPHeaders
class MockClient(object):
def __init__(self, ioloop):
self.ioloop = ioloop
self.mocked_urls = {}
self.ioloop.make_current()
self.client = AsyncHTTPClient()
self.original_fetch = self.client.fetch
def mock_url(self, url, method="GET"):
mock_response = MockResponse(url)
base_url = url.split("?")[0]
self.mocked_urls.setdefault(base_url, []).append(
(method, mock_response))
return mock_response
@contextlib.contextmanager
def patch(self):
# this is perhaps a bit sketchy -- basing on the idea that
# AsyncHTTPClient returns the singleton...
with mock.patch.object(self.client, "fetch", self.fetch):
yield
@gen.coroutine
def fetch(self, url, *args, **kwargs):
base_url = url.split("?")[0]
if base_url not in self.mocked_urls:
response = yield self.original_fetch(url, *args, **kwargs)
raise gen.Return(response)
responses = self.mocked_urls[base_url]
if len(responses) == 0:
raise MissingMockResponse(
"URL requested too many times: {}".format(base_url))
expected_method, response = responses.pop(0)
request_method = kwargs.get("method", "GET")
if expected_method != request_method:
responses.insert(0, (expected_method, response))
response = MockResponse(url)
response.body = \
"Method mismatch ({}) for mocked URL: {} {}".format(
request_method, expected_method, base_url)
response.code = 405
if response.code > 399 and kwargs.get("raise_error", True):
raise HTTPError(
code=response.code, message="Mock error: ({}) {}".format(
response.code, response.body), response=response)
raise gen.Return(response)
class MockResponse(object):
def __init__(self, url):
self.code = 200
self._body = ""
self._headers = HTTPHeaders()
self._custom_headers = {}
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def headers(self):
self._headers.setdefault("Content-Length", len(self._body))
return self._headers
class MissingMockResponse(Exception):
pass
|
"""Haiku types."""
import typing
from typing import Any, Callable, Mapping, Sequence
import jax.numpy as jnp
try:
# Using PyType's experimental support for forward references.
Module = typing._ForwardRef("haiku.Module") # pylint: disable=protected-access
except AttributeError:
Module = Any
Initializer = Callable[[Sequence[int], Any], jnp.ndarray]
Params = Mapping[str, Mapping[str, jnp.ndarray]]
State = Mapping[str, Mapping[str, jnp.ndarray]]
PRNGKey = jnp.ndarray # pylint: disable=invalid-name
|
import ctpy.data as data
import ctpy.math as m
from bson.objectid import ObjectId
import logging as log
from collections import defaultdict
import numpy as np
from slatkin import montecarlo # https://github.com/mmadsen/slatkin-exact-tools
import itertools
import pprint as pp
class ClassificationStatsPerSimrun:
def __init__(self, simconfig):
self.simconfig = simconfig
self.simrun_param_cache = None
self.param_cached = False
def process_simulation_run(self, simrun_id):
"""
Process the individual samples for a single simulation run, calculating any statistics that
require aggregation over the simulation run, saving the results to the database. This requires
that we process each "replication" separately, so that we do not mix them together.
:return:
"""
#log.debug("Starting analysis of simulation run %s", simrun_id)
combinations = [
range(0,self.simconfig.REPLICATIONS_PER_PARAM_SET),
self._get_classification_ids()
]
for par in itertools.product(*combinations):
repl = par[0]
classif = par[1]
log.debug("Processing combination %s - %s", repl, classif)
records = self._get_samples_for_simulation_run(simrun_id, repl, classif)
if records.count() < 1:
log.info("No samples in the database for simulation run: %s", simrun_id)
return
else:
log.info("Samples found for simulation run: %s", simrun_id)
class_time_cache = {}
# classification info
for s in records:
#log.debug("starting analysis of record")
#log.debug("record: %s", s)
# Only do this once - keep one of the records around so we can snag parameters from it later,
# outside the processing loop
if self.param_cached is False:
self.simrun_param_cache = s
self.param_cached = True
gen = s.simulation_time
for indiv in s.sample:
class_id = indiv.classid
# if we find an instance of the class showing up earlier than the cached time, record the earlier time
if class_id not in class_time_cache:
class_time_cache[class_id] = gen
else:
if class_time_cache[class_id] > gen:
class_time_cache[class_id] = gen
# the cache now contains the earliest time of appearance of all classes in the simulation run
#log.debug("cache: %s", class_time_cache)
class_time_appeared = [dict(classid=k,time=v) for k,v in class_time_cache.items()]
#log.debug("%s", class_time_appeared)
# calculate other stats...
#stats = self._innovation_interval_stats(class_time_cache)
# save the results
# TODO: Removed interval statistics 9/8/2013 in 1.0.2. Need to rethink the strategy given lots of zeros and sampling intervals
s = self.simrun_param_cache
data.storePerSimrunStatsPostclassification(s.classification_id,s.classification_type,s.classification_dim,
s.classification_coarseness,s.replication,s.sample_size,
s.population_size,s.mutation_rate,s.simulation_run_id,class_time_appeared,
None,None)
def _get_samples_for_simulation_run(self, simrun_id, replication, classification):
"""
:return:
"""
return data.IndividualSampleClassified.m.find(dict(simulation_run_id=simrun_id,replication=replication,classification_id=classification))
def _get_classification_ids(self):
res = data.ClassificationData.m.find().all()
id_list = []
for classification in res:
id_list.append(classification["_id"])
return id_list
def _innovation_interval_stats(self,cache):
"""
Takes a dict of class_id and generation of first appearance, and calculates the
intervals between class appearances
:return: tuple of mean, sd
"""
vals = sorted(cache.values())
log.debug("num class time values: %s", len(vals))
sorted_times = np.array(vals)
log.debug("num class time values: %s", len(sorted_times))
#log.debug("sorted times: %s", sorted_times)
intervals = np.diff(sorted_times)
log.debug("num intervals: %s intervals: %s", len(intervals), intervals)
mean = np.mean(intervals)
# ddof = 1 is required to give the standard statistical definition, with n-1 in the denominator
sd = np.std(intervals, ddof=1)
log.debug("mean: %s sd: %s", mean, sd)
return (mean,sd)
class ClassificationStatsPerSample:
# speed things up by caching mode definitions so we hit the DB a minimal number of times
mode_definition_cache = dict()
classification_dimension_cache = dict()
def __init__(self, simconfig, classification, save_identified_indiv=True):
self.simconfig = simconfig
self.classification = classification
self.class_id = classification["_id"]
self.dimensionality = classification["dimensions"]
self.coarseness = classification["mean_coarseness"]
self.class_type = classification["classification_type"]
self.save_indiv = save_identified_indiv
self.classification_size = self._calc_num_classes()
#log.debug("initializing ClassIdentifier for classification %s", self.class_id)
#log.debug(" Saving identified individuals, in addition to stats? %s", self.save_indiv)
def identify_individual_samples(self):
"""
Identify the individuals sampled from each generation of a simulation run to the appropriate class from the focal classification.
Each "sample" is a record from one generation of one replication of one
simulation run, at a given sample size and dimensionality. Within each
sample record is a list of sampled individual genotypes. This list is
what we iterate over to identify via the classification, and then we
calculate various stats, and store the resulting stats. If the flag for
saving raw individuals (after classification identification) is set, we
also store the list of individuals and the classes to which their genotypes identify.
:return: None
"""
log.info("Starting identification of individuals to classification %s", self.class_id)
# these are caches, to be used in other methods
global mode_counts_by_dimension, class_counts
records = self._get_individual_cursor_for_dimensionality(self.dimensionality)
#log.debug("record length: %s", len(records))
for s in records:
#log.debug("Starting analysis of record")
classified_indiv = []
mode_counts_by_dimension = {}
class_counts = defaultdict(int)
for dim_num in range(0, self.dimensionality):
# initialize a cache
mode_counts_by_dimension[dim_num] = defaultdict(int)
for indiv in s.sample:
ident_class = self._identify_genotype_to_class(indiv.genotype)
ident_indiv = dict(id=indiv.id,classid=ident_class)
#log.debug("identified to class: %s", ident_class)
classified_indiv.append(ident_indiv)
class_counts[ident_class] += 1
class_freq = [float(count)/float(s.sample_size) for class_id, count in class_counts.items() ]
shannon_entropy = m.diversity_shannon_entropy(class_freq)
class_iqv = m.diversity_iqv(class_freq)
slatkin_result = self._slatkin_neutrality_for_classes(class_counts.values())
#log.debug("class freq: %s shannon entropy: %s iqv: %s", class_freq, shannon_entropy, class_iqv)
stats = self._calc_postclassification_stats(s)
#log.debug("class richness %s", len(class_counts))
data.storePerGenerationStatsPostclassification(s.simulation_time,ObjectId(self.class_id),self.class_type,self.dimensionality,
self.coarseness,self.classification_size,s.replication,s.sample_size,s.population_size,s.mutation_rate,
s.simulation_run_id,stats["mode_richness_list"],stats["class_richness"],
stats["mode_iqv"],stats["mode_entropy"],class_iqv,shannon_entropy,stats["design_space_occupation"],None,slatkin_result)
if self.save_indiv:
data.storeIndividualSampleClassified(s.simulation_time,ObjectId(self.class_id),self.class_type,self.dimensionality,
self.coarseness,s.replication,s.sample_size,s.population_size,
s.mutation_rate, s.simulation_run_id, classified_indiv)
# private analytic methods
def _calc_postclassification_stats(self,s):
mode_richness_list = []
mode_evenness_iqv_list = []
mode_evenness_entropy_list = []
for dim, dim_dict in sorted(mode_counts_by_dimension.items()):
mode_richness_list.append(len(dim_dict))
mode_freq = [float(count)/float(s.sample_size) for dim, count in dim_dict.items()]
mode_evenness_iqv_list.append(m.diversity_iqv(mode_freq))
mode_evenness_entropy_list.append(m.diversity_shannon_entropy(mode_freq))
#log.debug("mode_richness_list %s", mode_richness_list )
results = {}
results["mode_richness_list"] = mode_richness_list
results["class_richness"] = len(class_counts)
results["design_space_occupation"] = float(len(class_counts)) / float(self.classification_size)
results["mode_iqv"] = mode_evenness_iqv_list
results["mode_entropy"] = mode_evenness_entropy_list
return results
def _calc_num_classes(self):
# given that each dimension has the same coarseness, of course....
return self.coarseness ** self.dimensionality
def _slatkin_neutrality_for_classes(self, class_counts):
(prob, theta) = montecarlo(self.simconfig.SLATKIN_MONTECARLO_REPLICATES, class_counts, len(class_counts))
return prob
# private methods
def _get_individual_cursor_for_dimensionality(self, dimensionality):
"""
Returns all records in the "fulldataset" collection that have the same dimensionality as the classification
we're processing. In order to prevent the mongodb timeout from occurring, we process the records in a batch
size of 1000, forcing the pymongo driver to go back to the database before the server times out the cursor.
This is helpful for datasets with a very large number of samples.
:param dimensionality:
:return: returns a Ming/pymongo cursor for the result set, in batches
"""
sample_cursor = data.IndividualSampleFullDataset.m.find(dict(dimensionality=dimensionality),dict(timeout=False))
return sample_cursor
def _get_and_cache_mode_definition(self, mode_id):
if mode_id in self.mode_definition_cache:
return self.mode_definition_cache[mode_id]
else:
mode_defn = data.ClassificationModeDefinitions.m.find(dict(_id=mode_id)).one()
self.mode_definition_cache[mode_id] = mode_defn["boundary_map"]
return mode_defn["boundary_map"]
def _get_and_cache_dimensions_for_classification(self, class_id):
if class_id in self.classification_dimension_cache:
return self.classification_dimension_cache[class_id]
else:
classification = data.ClassificationData.m.find(dict(_id=class_id)).one()
dimension_list = classification["modes_for_dimensions"]
self.classification_dimension_cache[class_id] = dimension_list
return dimension_list
def _identify_genotype_to_class(self, genotype):
"""
:param genotype:
:return:
"""
mode_dimension_id_list = self.classification["modes_for_dimensions"]
identified_modes = []
for dim_num in range(0, self.dimensionality):
dimension_id = mode_dimension_id_list[dim_num]
mode_boundaries = self._get_and_cache_mode_definition(dimension_id)
trait_for_dim = genotype[dim_num]
# increment the cached trait count for stats
mode_counts_by_dimension[dim_num][trait_for_dim] += 1
#log.debug("mode_boundaries: %s", mode_boundaries)
for mode_num in range(0, self.coarseness):
mode_defn = mode_boundaries[mode_num]
lower = mode_defn["lower"]
upper = mode_defn["upper"]
if lower <= trait_for_dim < upper:
identified_modes.append(mode_num)
break
# at the end of looping through the dimensions, we ought to have an ordered list of
# which modes the alleles each identified to given the mode boundaries
#log.debug("identified modes: %s", identified_modes)
return '-'.join([`num` for num in identified_modes])
def update_with_slatkin_test(simconfig, s):
"""
Takes an IndividualSampleClassified object, calculates the slatkin exact test for it,
and updates the existing pergenerationstats_postclassification object.
:param sample:
:return:
"""
class_counts = defaultdict(int)
for indiv in s.sample:
class_counts[indiv.classid] += 1
counts = class_counts.values()
(prob, theta) = montecarlo(simconfig.SLATKIN_MONTECARLO_REPLICATES, counts, len(counts))
# find the proper pergeneration object for this sample, and add the slatkin result
#
record = data.PerGenerationStatsPostclassification.m.find(dict(
classification_id=s.classification_id,
simulation_run_id=s.simulation_run_id,
simulation_time=s.simulation_time,
replication=s.replication,
sample_size=s.sample_size)).one()
data.updateFieldPerGenerationStatsPostclassification(record._id, "class_neutrality_slatkin", prob )
|
from youtrack.connection import Connection
connection = Connection('some url', 'root', 'root')
for user in connection.getUsers():
print("yet another")
if (user.login != 'root') and (user.login != 'guest'):
connection._reqXml('DELETE', '/admin/user/' + user.login, '')
|
import cmd
import subprocess
class ShellEnabled(cmd.Cmd):
last_output = ''
def do_shell(self, line):
"Run a shell command"
print("running shell command:", line)
sub_cmd = subprocess.Popen(line,
shell=True,
stdout=subprocess.PIPE)
output = sub_cmd.communicate()[0].decode('utf-8')
print(output)
self.last_output = output
def do_echo(self, line):
"""Print the input, replacing '$out' with
the output of the last shell command.
"""
# Obviously not robust
print(line.replace('$out', self.last_output))
def do_EOF(self, line):
return True
if __name__ == '__main__':
ShellEnabled().cmdloop()
|
"""``tornado.web`` 模块提供了一个简单的带有异步功能的web框架,来使其可以承载数以万计的开放连接,并适用于 `long polling <http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_ 场景.
这是一个简单的 "Hello, world" 的样例应用:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
查看 :doc:`guide` 文档,了解更多信息。
线程安全注意事项
-------------------
通常来说, Tornado中,在 `RequestHandler` 类里(还有其他位置)方法都不是线程安全的。特别是如: `~RequestHandler.write()`, `~RequestHandler.finish()`, 和 `~RequestHandler.flush()` 这几个方法只能在主线程中调用。如果你使用了多线程模式运行,就必须在请求结束前使用 `.IOLoop.add_callback` 将控制权交给主线程。
"""
from __future__ import (absolute_import, division,
print_function, with_statement)
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from io import BytesIO
from tornado.concurrent import Future, is_future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.util import (import_object, ObjectDict, raise_exc_info,
unicode_type, _websocket_mask)
from tornado.httputil import split_host_and_port
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overrided by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Subclass this class and define `get()` or `post()` to make a handler.
编写该类的子类,并且定义 `get()` 或 `post()` 方法来处理请求。
如果你想要支持不在标准的 GET/HEAD/POST 中的更多HTTP方法,你需要在你的 `RequestHandler` 子类中重写 ``SUPPORTED_METHODS`` 变量。
目前 `RequestHandler` 父类中 ``SUPPORTED_METHODS`` 变量支持:"GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS" 这几个方法。
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""子类的初始化函数,会在任何HTTP方法之前最先被调用。
URL规范的第三个字典类型参数将会作为参数传给initialize() 函数。
一般该函数可以用来做一些初始化工作。
Example::
class ProfileHandler(RequestHandler):
# database参数就是下面Application中的dict(database=database)
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
""" `self.application.settings <Application.settings>` 的别名."""
return self.application.settings
def head(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def get(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def post(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def delete(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def patch(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def put(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def options(self, *args, **kwargs):
""" 需要在子类中具体实现,为实现的方法被访问到时会产生HTTP 405(Method not allowed)错误。"""
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
在 `get`/`post`/等函数之前被调用,在 `initialize` 函数之后调用。
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(br"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode('utf-8')
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if (len(value) > 4000 or
RequestHandler._INVALID_HEADER_CHAR_RE.search(value)):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value, version=version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is a cached version of `get_current_user`, which you can
override to set the user based on, e.g., a cookie. If that
method is not overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
val = lambda x: x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish()
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionadded:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if is_future(result):
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
There is a subtle interaction between ``data_received`` and asynchronous
``prepare``: The first call to ``data_received`` may occur at any point
after the call to ``prepare`` has returned *or yielded*.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(httputil.HTTPServerConnectionDelegate):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `URLSpec` objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
The request class can be specified as either a class object or a
(fully-qualified) name.
Each tuple can contain additional elements, which correspond to the
arguments to the `URLSpec` constructor. (Prior to Tornado 3.2, this
only tuples of two or three elements were allowed).
A dictionary may be passed as the third element of the tuple,
which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
"""
def __init__(self, handlers=None, default_host="", transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, (tuple, list)):
assert len(spec) in (2, 3, 4)
spec = URLSpec(*spec)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = split_host_and_port(request.host.lower())[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def start_request(self, server_conn, request_conn):
# Modern HTTPServer interface
return _RequestDispatcher(self, request_conn)
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = _RequestDispatcher(self, None)
dispatcher.set_request(request)
return dispatcher.execute()
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _RequestDispatcher(httputil.HTTPMessageDelegate):
def __init__(self, application, connection):
self.application = application
self.connection = connection
self.request = None
self.chunks = []
self.handler_class = None
self.handler_kwargs = None
self.path_args = []
self.path_kwargs = {}
def headers_received(self, start_line, headers):
self.set_request(httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line,
headers=headers))
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def set_request(self, request):
self.request = request
self._find_handler()
self.stream_request_body = _has_stream_request_body(self.handler_class)
def _find_handler(self):
# Identify the handler to use as soon as we have the request.
# Save url path arguments for later.
app = self.application
handlers = app._get_host_handlers(self.request)
if not handlers:
self.handler_class = RedirectHandler
self.handler_kwargs = dict(url="%s://%s/"
% (self.request.protocol,
app.default_host))
return
for spec in handlers:
match = spec.regex.match(self.request.path)
if match:
self.handler_class = spec.handler_class
self.handler_kwargs = spec.kwargs
if spec.regex.groups:
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if spec.regex.groupindex:
self.path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
self.path_args = [_unquote_or_none(s)
for s in match.groups()]
return
if app.settings.get('default_handler_class'):
self.handler_class = app.settings['default_handler_class']
self.handler_kwargs = app.settings.get(
'default_handler_args', {})
else:
self.handler_class = ErrorHandler
self.handler_kwargs = dict(status_code=404)
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
f = self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will end
(calling `RequestHandler.finish` if it hasn't already been called),
but the outgoing response will not be modified and the error-handling
methods (including `RequestHandler.write_error`) will not be called.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video), but this handler should not be used with
files that are too large to fit comfortably in memory.
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
root = os.path.abspath(root)
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
return mime_type
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += b', Accept-Encoding'
else:
headers['Vary'] = b'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any groups
in the regex will be passed in to the handler's get/post/etc
methods as arguments.
* ``handler``: `RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`Application.reverse_url`.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
if isinstance(handler, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
handler = import_object(handler)
self.handler_class = handler
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
url = URLSpec
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (currently 0; reserved for future
# key rotation features)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2|1:0",
format_field(timestamp),
format_field(name),
format_field(value),
b''])
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
# Figure out what version this is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
value = utf8(value)
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
try:
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, rest = _consume_field(rest)
except ValueError:
return None
passed_sig = rest
signed_string = value[:-len(passed_sig)]
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unamteched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return escape.url_unescape(s, encoding=None, plus=False)
|
"""VBSP Server."""
from tornado.tcpserver import TCPServer
from empower.vbsp import PRT_UE_JOIN
from empower.vbsp import PRT_UE_LEAVE
from empower.core.pnfpserver import BaseTenantPNFDevHandler
from empower.core.pnfpserver import BasePNFDevHandler
from empower.core.pnfpserver import PNFPServer
from empower.core.vbs import VBS
from empower.core.module import ModuleEventWorker
from empower.core.module import ModuleWorker
from empower.restserver.restserver import RESTServer
from empower.persistence.persistence import TblVBS
from empower.vbsp import PRT_TYPES
from empower.vbsp import PRT_TYPES_HANDLERS
from empower.vbsp.vbspconnection import VBSPConnection
from empower.vbsp.uehandler import UEHandler
from empower.vbsp.tenantuehandler import TenantUEHandler
from empower.main import RUNTIME
DEFAULT_PORT = 2210
class TenantVBSHandler(BaseTenantPNFDevHandler):
"""TenantVBS Handler."""
HANDLERS = [r"/api/v1/tenants/([a-zA-Z0-9-]*)/vbses/?",
r"/api/v1/tenants/([a-zA-Z0-9-]*)/vbses/([a-zA-Z0-9:]*)/?"]
class VBSHandler(BasePNFDevHandler):
"""VBS Handler."""
HANDLERS = [(r"/api/v1/vbses/?"),
(r"/api/v1/vbses/([a-zA-Z0-9:]*)/?")]
class ModuleVBSPEventWorker(ModuleEventWorker):
"""Module worker (VBSP Server version).
Keeps track of the currently defined modules for each tenant (events only)
Attributes:
module_id: Next module id
modules: dictionary of modules currently active in this tenant
"""
def __init__(self, module, pt_type, pt_packet=None):
ModuleEventWorker.__init__(self, VBSPServer.__module__, module,
pt_type, pt_packet)
class ModuleVBSPWorker(ModuleWorker):
"""Module worker (VBSP Server version).
Keeps track of the currently defined modules for each tenant (events only)
Attributes:
module_id: Next module id
modules: dictionary of modules currently active in this tenant
"""
def __init__(self, module, pt_type, pt_packet=None):
ModuleWorker.__init__(self, VBSPServer.__module__, module, pt_type,
pt_packet)
def handle_packet(self, response):
"""Handle response message."""
if response.head.t_id not in self.modules:
return
module = self.modules[response.head.t_id]
self.log.info("Received %s response (id=%u)", self.module.MODULE_NAME,
response.head.t_id)
module.handle_response(response)
class VBSPServer(PNFPServer, TCPServer):
"""Exposes the VBS API."""
PNFDEV = VBS
TBL_PNFDEV = TblVBS
def __init__(self, port, prt_types, prt_types_handlers):
PNFPServer.__init__(self, prt_types, prt_types_handlers)
TCPServer.__init__(self)
self.port = int(port)
self.connection = None
self.listen(self.port)
def handle_stream(self, stream, address):
self.log.info('Incoming connection from %r', address)
self.connection = VBSPConnection(stream, address, server=self)
def send_ue_leave_message_to_self(self, ue):
"""Send an UE_LEAVE message to self."""
self.log.info("UE LEAVE %s (%u)", ue.addr, ue.plmn_id)
for handler in self.pt_types_handlers[PRT_UE_LEAVE]:
handler(ue)
def send_ue_join_message_to_self(self, ue):
"""Send an UE_JOIN message to self."""
self.log.info("UE JOIN %s (%u)", ue.addr, ue.plmn_id)
for handler in self.pt_types_handlers[PRT_UE_JOIN]:
handler(ue)
def launch(port=DEFAULT_PORT):
"""Start VBSP Server Module."""
server = VBSPServer(port, PRT_TYPES, PRT_TYPES_HANDLERS)
rest_server = RUNTIME.components[RESTServer.__module__]
rest_server.add_handler_class(TenantVBSHandler, server)
rest_server.add_handler_class(VBSHandler, server)
rest_server.add_handler_class(UEHandler, server)
rest_server.add_handler_class(TenantUEHandler, server)
server.log.info("VBSP Server available at %u", server.port)
return server
|
user_list = [('Stephen', 'Stuart', 'sstuart@google.com'),
('Chris', 'Ritzo', 'critzo@measurementlab.net'),
('Josh', 'Bailey', 'joshb@google.com'),
('Nathan', 'Kinkade', 'kinkade@measurementlab.net'),
('Matt', 'Mathis', 'mattmathis@google.com'),
('Peter', 'Boothe', 'pboothe@google.com'),
('Greg', 'Russell', 'gfr@google.com'),
('Josh', 'King', 'jking@chambana.net'),
('Ross', 'Schulman', 'ross@opentechinstitute.org'),
('Roberto', 'DAuria', 'roberto@measurementlab.net'),
('M-Lab', 'Automation', 'support@measurementlab.net')]
|
"""
lunaport.domain.host
~~~~~~~~~~~~~~~~~~~~
Bbusiness logic layer for host resource.
"""
import pprint
import copy
import socket
pp = pprint.PrettyPrinter(indent=4).pprint
from .. helpers import validate_net_addr
from base import BaseAdaptor, BaseEntrie
class Host(BaseEntrie):
""" Host in lunaport terminology means physical/virtual server
which can be used as load generator or testing target.
"""
attr_required = [
'fqdn',
'ip_addr',
]
attr_optional = [
'id',
'added_at',
'descr',
'line_id',
'line_name',
'is_spec_tank',
'is_tank',
]
attr_date = ['added_at']
def __repr__(self):
return '<domain.host #{} {}>'.format(getattr(self, 'id'), self.fqdn)
class HostBuilder(object):
"""Host instance static builder.
"""
req_attr_allowed = [
'fqdn',
'ip_addr',
'is_spec_tank',
'is_tank',
]
req_attr_allowed_set = set(req_attr_allowed)
@classmethod
def from_Flask_req(cls, r):
"""Creates class instance from Flask request object.
Args:
r: Flask request object.
Returns:
Host class instance.
"""
if r.mimetype == 'multipart/form-data':
msg_rv = r.form
elif r.mimetype == 'application/json':
msg_rv = r.json
else:
raise ValueError('Unsupported mime type')
if not msg_rv:
raise ValueError('Can\'t deserialize request body')
# ImmutableMultiDict to dict cast
msg_rv = dict((k, v) for k, v in msg_rv.items())
msg_set = set(msg_rv.keys())
if not msg_set.issubset(cls.req_attr_allowed_set):
err_msg = [
'Body contains unexpected params:',
str(list(msg_set - cls.req_attr_allowed_set))
]
raise ValueError(' '.join(err_msg))
return cls.from_addr(**msg_rv)
@classmethod
def from_addr(cls, fqdn=None, ip_addr=None, unknown_addr=None,
is_spec_tank=False, is_tank=False):
"""
Creates class Host instance from FQDN and/or ip_addr.
Args:
fqdn: str, Fully Qualified Domain Name.
ip_addr: str, Internet Protocol address.
unknown_addr: str, IPv4 or IPv6 or FQDN. Yandex-tank allow user to
call tool with one that addr as a Phantom section param. It's
server side task to determine which add type was provided.
Returns:
Host class instance.
Throws:
ValueError
"""
def resolve(fqdn):
try:
return socket.getaddrinfo(fqdn, None).pop()[-1][0]
except socket.gaierror:
raise ValueError(
'Unresolvable hostname:*{}* try to use fqdn or ip_addr instead'.format(fqdn))
if unknown_addr:
if validate_net_addr(unknown_addr):
try:
params = {
'fqdn': socket.gethostbyaddr(unknown_addr)[0],
'ip_addr': unknown_addr,
}
except:
params = {
'fqdn': 'EXAMPLE.COM',
'ip_addr': unknown_addr,
}
else:
params = {
'fqdn': unknown_addr,
'ip_addr': resolve(unknown_addr),
}
elif fqdn and ip_addr:
params = {
'fqdn': fqdn,
'ip_addr': ip_addr,
}
elif fqdn:
try:
params = {
'fqdn': fqdn,
'ip_addr': socket.gethostbyname_ex(fqdn)[-1].pop()
}
except socket.gaierror:
raise ValueError('Can\'t resolve provided *fqdn*')
elif ip_addr:
try:
params = {
'fqdn': socket.gethostbyaddr(ip_addr)[0],
'ip_addr': ip_addr,
}
except:
params = {
'fqdn': 'EXAMPLE.COM',
'ip_addr': ip_addr,
}
else:
raise ValueError(
'at least one of params: fqdn, ip_addr should be specified')
params.update({
'is_tank': is_tank,
'is_spec_tank': is_spec_tank,
})
return Host(**params)
@classmethod
def from_row(cls, **row):
"""Creates class instance from RDBMS returned row.
Args:
row: dict with table columns as keys.
Returns:
Host class instance.
"""
return Host(**row)
class HostAdaptor(BaseAdaptor):
@classmethod
def to_dict(cls, host_entrie, date_iso=False):
rv = copy.deepcopy(host_entrie.__dict__)
if 'line_id' in rv.keys():
rv['line'] = {'id': rv['line_id']}
del rv['line_id']
if 'line_name' in rv.keys():
rv['line']['name'] = rv['line_name']
del rv['line_name']
if date_iso: # datetime obj JSON serializable in ISO 8601 format.
for attr in host_entrie.attr_date:
if rv.get(attr):
rv[attr] = rv[attr].isoformat()
return rv
|
<html>
Something
</html>
|
"""Provides a class for managing BIG-IP L7 Policy resources."""
import logging
from operator import itemgetter
from f5_cccl.resource import Resource
from f5_cccl.resource.ltm.policy.action import Action
from f5_cccl.resource.ltm.policy.condition import Condition
from f5_cccl.resource.ltm.policy.rule import Rule
LOGGER = logging.getLogger(__name__)
class Policy(Resource):
"""L7 Policy class."""
# The property names class attribute defines the names of the
# properties that we wish to compare.
properties = dict(
name=None,
partition=None,
strategy="/Common/first-match",
rules=list()
)
def __init__(self, name, partition, **data):
"""Create the policy and nested class objects"""
super(Policy, self).__init__(name, partition)
# Get the rules.
rules = data.get('rules', list())
self._data['rules'] = self._create_rules(rules)
self._data['strategy'] = data.get(
'strategy',
self.properties.get('strategy')
)
# Fix these values.
self._data['legacy'] = True
self._data['controls'] = ["forwarding"]
self._data['requires'] = ["http"]
def __eq__(self, other):
"""Check the equality of the two objects.
Only compare the properties as defined in the
properties class dictionany.
"""
if not isinstance(other, Policy):
return False
for key in self.properties:
if key == 'rules':
if len(self._data['rules']) != len(other.data['rules']):
return False
for index, rule in enumerate(self._data['rules']):
if rule != other.data['rules'][index]:
return False
continue
if self._data[key] != other.data.get(key, None):
return False
return True
def __str__(self):
return str(self._data)
def _create_rules(self, rules):
"""Create a list of the policy Rules from rules data.
The order of the rules in the list is taken as the order
in which the should be evaluated.
"""
new_rules = list()
for index, rule in enumerate(rules):
rule['ordinal'] = index
new_rules.append(Rule(**rule).data)
return new_rules
def _uri_path(self, bigip):
return bigip.tm.ltm.policys.policy
class IcrPolicy(Policy):
"""Policy object created from the iControl REST object"""
def __init__(self, name, partition, **data):
policy_data = self._flatten_policy(data)
super(IcrPolicy, self).__init__(name, partition, **policy_data)
def _flatten_policy(self, data):
"""Flatten the unnecessary levels of nesting for the policy object.
The structure of a policy are returned by the SDK looks like:
'policy': {
...,
'rulesReference': {
...,
'items': [
...,
'actionsReference': [
...,
'items': [
{action0}, {action1},...,{actionN}
]
],
'rulesReference': [
...,
'items': [
{rule0}, {rule1},...,{ruleN}
]
]
]
}
}
Flattens to a simpler representation:
'policy': {
...,
'rules': {
...,
'actions': [
{action0}, {action1},...,{actionN}
]
'rules': [
{rule0}, {rule1},...,{ruleN}
]
}
}
This function does filter some of the Rule, Action, and Condition
properties.
"""
policy = dict()
for key in Policy.properties:
if key == 'rules':
rulesReference = data['rulesReference']
if 'items' in rulesReference:
policy['rules'] = self._flatten_rules(
rulesReference['items'])
elif key == 'name' or key == 'partition':
pass
else:
policy[key] = data.get(key)
return policy
def _flatten_rules(self, rules_list):
rules = list()
for rule in sorted(rules_list, key=itemgetter('ordinal')):
flat_rule = dict()
for key in Rule.properties:
if key == 'actions':
flat_rule['actions'] = self._flatten_actions(rule)
elif key == 'conditions':
flat_rule['conditions'] = self._flatten_condition(rule)
else:
flat_rule[key] = rule.get(key)
rules.append(flat_rule)
return rules
def _flatten_actions(self, rule):
actions = list()
actions_reference = rule.get('actionsReference', dict())
for action in actions_reference.get('items', list()):
flat_action = dict()
for key in Action.properties:
flat_action[key] = action.get(key)
actions.append(flat_action)
return actions
def _flatten_condition(self, rule):
conditions = list()
conditions_reference = rule.get('conditionsReference', dict())
for condition in conditions_reference.get('items', list()):
flat_condition = dict()
for key in Condition.properties:
flat_condition[key] = condition.get(key)
conditions.append(flat_condition)
return conditions
class ApiPolicy(Policy):
"""Policy object created from the API configuration object"""
pass
|
import setuptools
setuptools.setup()
|
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ['classproperty']
class classproperty(property):
""" a property decorator for classmethods """
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)()
def __set__(self, obj, value):
cls = type(obj)
return self.fset.__get__(None, cls)(value)
|
"""Test the Vilfo Router config flow."""
import vilfo
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.vilfo.const import DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, CONF_ID, CONF_MAC
from tests.async_mock import Mock, patch
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_mac = "FF-00-00-00-00-00"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.get_board_information", return_value=None
), patch("vilfo.Client.resolve_mac_address", return_value=mock_mac), patch(
"homeassistant.components.vilfo.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.vilfo.async_setup_entry"
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "testadmin.vilfo.com", CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "testadmin.vilfo.com"
assert result2["data"] == {
"host": "testadmin.vilfo.com",
"access_token": "test-token",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.resolve_mac_address", return_value=None
), patch(
"vilfo.Client.get_board_information",
side_effect=vilfo.exceptions.AuthenticationException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "testadmin.vilfo.com", "access_token": "test-token"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("vilfo.Client.ping", side_effect=vilfo.exceptions.VilfoException), patch(
"vilfo.Client.resolve_mac_address"
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "testadmin.vilfo.com", "access_token": "test-token"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch("vilfo.Client.ping", side_effect=vilfo.exceptions.VilfoException), patch(
"vilfo.Client.resolve_mac_address"
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "testadmin.vilfo.com", "access_token": "test-token"},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["errors"] == {"base": "cannot_connect"}
async def test_form_wrong_host(hass):
"""Test we handle wrong host errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={"host": "this is an invalid hostname", "access_token": "test-token"},
)
assert result["errors"] == {"host": "wrong_host"}
async def test_form_already_configured(hass):
"""Test that we handle already configured exceptions appropriately."""
first_flow_result1 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.get_board_information",
return_value=None,
), patch("vilfo.Client.resolve_mac_address", return_value=None):
first_flow_result2 = await hass.config_entries.flow.async_configure(
first_flow_result1["flow_id"],
{CONF_HOST: "testadmin.vilfo.com", CONF_ACCESS_TOKEN: "test-token"},
)
second_flow_result1 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.get_board_information",
return_value=None,
), patch("vilfo.Client.resolve_mac_address", return_value=None):
second_flow_result2 = await hass.config_entries.flow.async_configure(
second_flow_result1["flow_id"],
{CONF_HOST: "testadmin.vilfo.com", CONF_ACCESS_TOKEN: "test-token"},
)
assert first_flow_result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert second_flow_result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert second_flow_result2["reason"] == "already_configured"
async def test_form_unexpected_exception(hass):
"""Test that we handle unexpected exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.vilfo.config_flow.VilfoClient",
) as mock_client:
mock_client.return_value.ping = Mock(side_effect=Exception)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "testadmin.vilfo.com", "access_token": "test-token"},
)
assert result2["errors"] == {"base": "unknown"}
async def test_validate_input_returns_data(hass):
"""Test we handle the MAC address being resolved or not."""
mock_data = {"host": "testadmin.vilfo.com", "access_token": "test-token"}
mock_data_with_ip = {"host": "192.168.0.1", "access_token": "test-token"}
mock_mac = "FF-00-00-00-00-00"
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.get_board_information", return_value=None
), patch("vilfo.Client.resolve_mac_address", return_value=None):
result = await hass.components.vilfo.config_flow.validate_input(
hass, data=mock_data
)
assert result["title"] == mock_data["host"]
assert result[CONF_HOST] == mock_data["host"]
assert result[CONF_MAC] is None
assert result[CONF_ID] == mock_data["host"]
with patch("vilfo.Client.ping", return_value=None), patch(
"vilfo.Client.get_board_information", return_value=None
), patch("vilfo.Client.resolve_mac_address", return_value=mock_mac):
result2 = await hass.components.vilfo.config_flow.validate_input(
hass, data=mock_data
)
result3 = await hass.components.vilfo.config_flow.validate_input(
hass, data=mock_data_with_ip
)
assert result2["title"] == mock_data["host"]
assert result2[CONF_HOST] == mock_data["host"]
assert result2[CONF_MAC] == mock_mac
assert result2[CONF_ID] == mock_mac
assert result3["title"] == mock_data_with_ip["host"]
assert result3[CONF_HOST] == mock_data_with_ip["host"]
assert result3[CONF_MAC] == mock_mac
assert result3[CONF_ID] == mock_mac
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BoPress.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.