repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Fiware/ops.Fuel-main-dev | fuelweb_ui_test/browser.py | 4 | 1487 | from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from fuelweb_ui_test.settings import BROWSER
from fuelweb_ui_test.settings import CHROME_EXECUTABLE_PATH
from fuelweb_ui_test.settings import SELENIUM_IMPLICIT_WAIT
from pyvirtualdisplay import Display
driver = None
def start_driver(browser=None):
browser = browser or BROWSER
def start_chrome():
return webdriver.Chrome(
executable_path=CHROME_EXECUTABLE_PATH,
desired_capabilities=DesiredCapabilities.CHROME)
def start_firefox():
return webdriver.Firefox()
def start_iexplore():
return webdriver.Ie()
def start_headless():
display = Display(visible=0, size=(1024, 768))
display.start()
return webdriver.Chrome(
executable_path=CHROME_EXECUTABLE_PATH,
desired_capabilities=DesiredCapabilities.CHROME)
def start_phantom():
return webdriver.PhantomJS()
global driver
if browser == "iexplore":
driver = start_iexplore()
elif browser == "chrome":
driver = start_chrome()
elif browser == "firefox":
driver = start_firefox()
elif browser == "headless":
driver = start_headless()
elif browser == "phantom":
driver = start_phantom()
# driver.set_window_size(1024, 768)
driver.maximize_window()
driver.implicitly_wait(SELENIUM_IMPLICIT_WAIT)
return driver
def quit_driver():
driver.quit()
| apache-2.0 |
etingof/pysnmp | examples/v1arch/asyncore/agent/ntforg/send-trap-over-ipv4-and-ipv6.py | 1 | 2187 | """
TRAP over multiple transports
+++++++++++++++++++++++++++++
The following script sends two SNMP TRAP notification using the
following options:
* with SNMPv1
* with community name 'public'
* over IPv4/UDP and IPv6/UDP
* send TRAP notification
* to a Manager at demo.snmplabs.com:162 and [::1]
* with TRAP ID 'coldStart' specified as an OID
* include managed objects information:
* with default Uptime value
* with default Agent Address with '127.0.0.1'
* overriding Enterprise OID with 1.3.6.1.4.1.20408.4.1.1.2
The following Net-SNMP commands will produce similar SNMP notification:
| $ snmptrap -v1 -c public udp:demo.snmplabs.com 1.3.6.1.4.1.20408.4.1.1.2 127.0.0.1 1 0 12345
| $ snmptrap -v1 -c public udp6:[::1] 1.3.6.1.4.1.20408.4.1.1.2 127.0.0.1 1 0 12345
"""#
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6
from pyasn1.codec.ber import encoder
from pysnmp.proto import api
# Protocol version to use
pMod = api.PROTOCOL_MODULES[api.SNMP_VERSION_1]
# pMod = api.protoModules[api.protoVersion2c]
# Build PDU
trapPDU = pMod.TrapPDU()
pMod.apiTrapPDU.setDefaults(trapPDU)
# Traps have quite different semantics across proto versions
if pMod == api.PROTOCOL_MODULES[api.SNMP_VERSION_1]:
pMod.apiTrapPDU.setEnterprise(trapPDU, (1, 3, 6, 1, 1, 2, 3, 4, 1))
pMod.apiTrapPDU.setGenericTrap(trapPDU, 'coldStart')
# Build message
trapMsg = pMod.Message()
pMod.apiMessage.setDefaults(trapMsg)
pMod.apiMessage.setCommunity(trapMsg, 'public')
pMod.apiMessage.setPDU(trapMsg, trapPDU)
transportDispatcher = AsyncoreDispatcher()
# UDP/IPv4
transportDispatcher.registerTransport(
udp.DOMAIN_NAME, udp.UdpSocketTransport().openClientMode()
)
transportDispatcher.sendMessage(
encoder.encode(trapMsg), udp.DOMAIN_NAME, ('demo.snmplabs.com', 162)
)
# UDP/IPv6
transportDispatcher.registerTransport(
udp6.DOMAIN_NAME, udp6.Udp6SocketTransport().openClientMode()
)
transportDispatcher.sendMessage(
encoder.encode(trapMsg), udp6.DOMAIN_NAME, ('::1', 162)
)
# Dispatcher will finish as all scheduled messages are sent
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
| bsd-2-clause |
nikhilnrng/german-credit-risk | src/main.py | 1 | 1468 | import model
import preprocessing
from defines import Metadata
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
def main():
metadata = Metadata()
data, labels = preprocessing.load(metadata)
data = preprocessing.encode(data, metadata.COLUMNS)
# divide data into training and test sets
x_train, x_test, y_train, y_test = train_test_split(
data, labels, test_size=0.2) #, random_state=33)
# run classifiers classifiers
clf_base = model.baseline_classifier(x_train, y_train)
clf_nb = model.naive_bayes_classifier(x_train, y_train, metadata.COLUMNS)
clf_knn = model.knn_classifier(x_train, y_train, metadata.COLUMNS)
clf_svm = model.svm_classifier(x_train, y_train, metadata.COLUMNS)
# filter best classifier
clf = [(clf[1].best_score_, clf) for clf in [('base', clf_base),
('knn', clf_knn),
('svm', clf_svm),
('nb', clf_nb)]]
name, clf = max(clf, key=lambda x: x[0])[1]
# predict test set
y_pred = clf.predict(x_test)
print 'Best classifier: %s' % name
print '\taccuracy: %0.3f\n' % accuracy_score(y_test, y_pred)
print classification_report(y_test, y_pred)
if __name__ == '__main__':
main()
| mit |
luogangyi/bcec-nova | nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py | 16 | 18963 | # Copyright 2013 Metacloud, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_group_default_rules
from nova.api.openstack import wsgi
from nova import context
import nova.db
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_default_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'TCP')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('cidr', '10.10.10.0/24')
return rule
def security_group_default_rule_db(security_group_default_rule, id=None):
attrs = security_group_default_rule.copy()
if id is not None:
attrs['id'] = id
return AttrDict(attrs)
class TestSecurityGroupDefaultRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRules, self).setUp()
self.controller = \
security_group_default_rules.SecurityGroupDefaultRulesController()
def test_create_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
sgr_dict = dict(security_group_default_rule=sgr)
res_dict = self.controller.create(req, sgr_dict)
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_create_security_group_default_rule_with_no_to_port(self):
sgr = security_group_default_rule_template()
del sgr['to_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_from_port(self):
sgr = security_group_default_rule_template()
del sgr['from_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_ip_protocol(self):
sgr = security_group_default_rule_template()
del sgr['ip_protocol']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_cidr(self):
sgr = security_group_default_rule_template()
del sgr['cidr']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_with_blank_to_port(self):
sgr = security_group_default_rule_template(to_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_from_port(self):
sgr = security_group_default_rule_template(from_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_cidr(self):
sgr = security_group_default_rule_template(cidr='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_non_numerical_to_port(self):
sgr = security_group_default_rule_template(to_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_non_numerical_from_port(self):
sgr = security_group_default_rule_template(from_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_cidr(self):
sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_to_port(self):
sgr = security_group_default_rule_template(to_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_from_port(self):
sgr = security_group_default_rule_template(from_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_body(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
def test_create_duplicate_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.create(req, {'security_group_default_rule': sgr})
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list(self):
self.test_create_security_group_default_rule()
rules = [dict(id=1,
ip_protocol='TCP',
from_port=22,
to_port=22,
ip_range=dict(cidr='10.10.10.0/24'))]
expected = {'security_group_default_rules': rules}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_default_security_group_default_rule_show(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.show(req, '1')
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_delete_security_group_default_rule(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
self.called = False
def security_group_default_rule_destroy(context, id):
self.called = True
def return_security_group_default_rule(context, id):
self.assertEqual(sgr['id'], id)
return security_group_default_rule_db(sgr)
self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
security_group_default_rule_destroy)
self.stubs.Set(nova.db, 'security_group_default_rule_get',
return_security_group_default_rule)
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_security_group_ensure_default(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
ctxt = context.get_admin_context()
setattr(ctxt, 'project_id', 'new_project_id')
sg = nova.db.security_group_ensure_default(ctxt)
rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
security_group_rule = rules[0]
self.assertEqual(sgr['id'], security_group_rule.id)
self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
self.assertEqual(sgr['from_port'], security_group_rule.from_port)
self.assertEqual(sgr['to_port'], security_group_rule.to_port)
self.assertEqual(sgr['cidr'], security_group_rule.cidr)
class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
deserializer = security_group_default_rules.\
SecurityGroupDefaultRulesXMLDeserializer()
self.deserializer = deserializer
def test_create_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_to_port_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_from_port_request(self):
serial_request = """
<security_group_default_rule>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_ip_protocol_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_cidr_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
},
}
self.assertEqual(request['body'], expected)
class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer =\
security_group_default_rules.SecurityGroupDefaultRuleTemplate()
self.index_serializer =\
security_group_default_rules.SecurityGroupDefaultRulesTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_default_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
seen.add(child_tag)
if child_tag == 'ip_range':
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertIn(gr_child_tag, raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def test_rule_serializer(self):
raw_rule = dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24'))
rule = dict(security_group_default_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rule', self._tag(tree))
self._verify_security_group_default_rule(raw_rule, tree)
def test_index_serializer(self):
rules = [dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24')),
dict(id='234',
ip_protocol='UDP',
from_port='23456',
to_port='234567',
ip_range=dict(cidr='10.12.0.0/18')),
dict(id='345',
ip_protocol='tcp',
from_port='3456',
to_port='4567',
ip_range=dict(cidr='192.168.1.0/32'))]
rules_dict = dict(security_group_default_rules=rules)
text = self.index_serializer.serialize(rules_dict)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rules', self._tag(tree))
self.assertEqual(len(rules), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group_default_rule(rules[idx], child)
| apache-2.0 |
jonyroda97/redbot-amigosprovaveis | lib/future/backports/urllib/response.py | 82 | 3180 | """Response classes used by urllib.
The base class, addbase, defines a minimal file-like interface,
including read() and readline(). The typical response object is an
addinfourl instance, which defines an info() method that returns
headers and a geturl() method that returns the url.
"""
from __future__ import absolute_import, division, unicode_literals
from future.builtins import object
class addbase(object):
"""Base class for addinfo and addclosehook."""
# XXX Add a method to expose the timeout on the underlying socket?
def __init__(self, fp):
# TODO(jhylton): Is there a better way to delegate using io?
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
# TODO(jhylton): Make sure an object with readlines() is also iterable
if hasattr(self.fp, "readlines"):
self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
def __iter__(self):
# Assigning `__iter__` to the instance doesn't work as intended
# because the iter builtin does something like `cls.__iter__(obj)`
# and thus fails to find the _bound_ method `obj.__iter__`.
# Returning just `self.fp` works for built-in file objects but
# might not work for general file-like objects.
return iter(self.fp)
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
if self.fp:
self.fp.close()
self.fp = None
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
self.__iter__ = None
self.__next__ = None
def __enter__(self):
if self.fp is None:
raise ValueError("I/O operation on closed file")
return self
def __exit__(self, type, value, traceback):
self.close()
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
del absolute_import, division, unicode_literals, object
| gpl-3.0 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/external/decorators/_decorators.py | 3 | 8607 | """
Decorators for labeling and modifying behavior of test objects.
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
::
nose.tools.make_decorator(original_function)(decorator)
in returning the decorator, in order to preserve meta-data such as
function name, setup and teardown functions and so on - see
``nose.tools`` for more information.
"""
import warnings
# IPython changes: make this work if numpy not available
# Original code:
#from numpy.testing.utils import \
# WarningManager, WarningMessage
# Our version:
from _numpy_testing_utils import WarningManager
try:
from _numpy_testing_noseclasses import KnownFailureTest
except:
pass
# End IPython changes
def slow(t):
"""
Label a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consists of
thousands of tests, so even a second is significant).
Parameters
----------
t : callable
The test to label as slow.
Returns
-------
t : callable
The decorated test `t`.
Examples
--------
The `numpy.testing` module includes ``import decorators as dec``.
A test can be decorated as slow like this::
from numpy.testing import *
@dec.slow
def test_big(self):
print 'Big, slow test'
"""
t.slow = True
return t
def setastest(tf=True):
"""
Signals to nose that this function is or is not a test.
Parameters
----------
tf : bool
If True, specifies that the decorated callable is a test.
If False, specifies that the decorated callable is not a test.
Default is True.
Notes
-----
This decorator can't use the nose namespace, because it can be
called from a non-test module. See also ``istest`` and ``nottest`` in
``nose.tools``.
Examples
--------
`setastest` can be used in the following way::
from numpy.testing.decorators import setastest
@setastest(False)
def func_with_test_in_name(arg1, arg2):
pass
"""
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
"""
Make function raise SkipTest exception if a given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip the decorated test.
msg : str, optional
Message to give on raising a SkipTest exception. Default is None.
Returns
-------
decorator : function
Decorator which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = '\n'+msg
return "Skipping test: %s%s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureTest exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest, msg
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def deprecated(conditional=True):
"""
Filter deprecation warnings while running the test suite.
This decorator can be used to filter DeprecationWarning's, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable, optional
Flag to determine whether to mark test as deprecated or not. If the
condition is a callable, it is used at runtime to dynamically make the
decision. Default is True.
Returns
-------
decorator : function
The `deprecated` decorator itself.
Notes
-----
.. versionadded:: 1.4.0
"""
def deprecate_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
ctx = WarningManager(record=True)
l = ctx.__enter__()
warnings.simplefilter('always')
try:
f(*args, **kwargs)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% f.__name__)
if not l[0].category is DeprecationWarning:
raise AssertionError("First warning for %s is not a " \
"DeprecationWarning( is %s)" % (f.__name__, l[0]))
finally:
ctx.__exit__()
if callable(conditional):
cond = conditional()
else:
cond = conditional
if cond:
return nose.tools.make_decorator(f)(_deprecated_imp)
else:
return f
return deprecate_decorator
| lgpl-3.0 |
imakin/PersonalAssistant | GameBot/src_py/makinreusable/makinbutton.py | 1 | 2302 | """
@author Izzulmakin, January - July 2015
"""
from PySide import QtCore, QtGui
class MakinButton(QtGui.QPushButton):
"""custom qpushbutton ta kei ono sinyal dihover & dileave"""
"""gawe slot? http://www.pythoncentral.io/pysidepyqt-tutorial-creating-your-own-signals-and-slots/"""
dihover = QtCore.Signal()
dileave = QtCore.Signal()
def __init__(self,text="",parent=None):
super(MakinButton,self).__init__(text,parent)
def getStylesheetByName(self,name):
re.findall(r"([\w-]+:.+;)",self.stylesheet)
def enterEvent(self, event):
self.dihover.emit()
return QtGui.QPushButton.enterEvent(self, event)
def leaveEvent(self, event):
self.dileave.emit()
return QtGui.QPushButton.leaveEvent(self, event)
class MakinButtonAnimated(MakinButton):
def __init__(self,p,col1="rgba(58, 107, 115, 155)",col2="rgba(255, 255, 255, 0)"):
super(MakinButtonAnimated,self).__init__(p)
self.col1 = col1
self.col2 = col2
self.setStyleSheet("background-color:transparent;")
self.AStop = 0.0
self.StyleStatic = "border:0px;"
self.ASpeed = 20
self.updateAnim()
self.Ti = QtCore.QTimer(self)
self.To = QtCore.QTimer(self)
self.Ti.timeout.connect(self.animIn)
self.To.timeout.connect(self.animOut)
def setStyleStatic(self,style):
self.StyleStatic = style
self.updateAnim()
def enterEvent(self,event):
self.To.stop()
self.Ti.start(self.ASpeed)
return QtGui.QPushButton.enterEvent(self, event)
def leaveEvent(self, event):
self.Ti.stop()
self.To.start(self.ASpeed)
return QtGui.QPushButton.leaveEvent(self, event)
def animIn(self):
self.AStop += 0.1
if (self.AStop>=0.90):
self.AStop = 0.94
self.updateAnim()
self.Ti.stop()
else:
self.updateAnim()
def animOut(self):
self.AStop -= 0.1
if (self.AStop<=0.1):
self.AStop = 0.0
self.updateAnim()
self.To.stop()
else:
self.updateAnim()
def updateAnim(self):
self.setStyleSheet("#"+str(self.objectName())+"""{background-color: qradialgradient(spread:pad,
cx:0.5,
cy:0.5,
radius:0.5,
fx:0.5,
fy:0.5,
stop:"""+str(self.AStop)+" "+self.col1+""",
stop:"""+str(self.AStop+0.05)+" "+self.col2+""");}
"""+str(self.StyleStatic))
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/lib/retry_stats_unittest.py | 1 | 7457 | #!/usr/bin/python
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the retry_stats.py module."""
from __future__ import print_function
import os
import StringIO
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.lib import cros_test_lib
from chromite.lib import retry_stats
# We access internal members to help with testing.
# pylint: disable=W0212
class TestRetryException(Exception):
"""Used when testing failure cases."""
class TestRetryStats(cros_test_lib.TestCase):
"""This contains test cases for the retry_stats module."""
CAT = 'Test Service A'
CAT_B = 'Test Service B'
SUCCESS_RESULT = 'success result'
def setUp(self):
retry_stats._STATS_COLLECTION = None
def handlerNoRetry(self, _e):
return False
def handlerRetry(self, _e):
return True
def callSuccess(self):
return self.SUCCESS_RESULT
def callFailure(self):
raise TestRetryException()
def _verifyStats(self, category, success=0, failure=0, retry=0):
"""Verify that the given category has the specified values collected."""
stats = [e for e in retry_stats._STATS_COLLECTION if e.category == category]
stats_success = len([e for e in stats if retry_stats._SuccessFilter(e)])
stats_failure = len(stats) - stats_success
stats_retry = sum([retry_stats._RetryCount(e) for e in stats])
self.assertEqual(stats_success, success)
self.assertEqual(stats_failure, failure)
self.assertEqual(stats_retry, retry)
def testSetupStats(self):
"""Verify that we do something when we setup a new stats category."""
# Show that setup does something.
self.assertEqual(retry_stats._STATS_COLLECTION, None)
retry_stats.SetupStats()
self.assertNotEqual(retry_stats._STATS_COLLECTION, None)
def testReportCategoryStatsEmpty(self):
retry_stats.SetupStats()
out = StringIO.StringIO()
retry_stats.ReportCategoryStats(out, self.CAT)
expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 0
** Failure: 0
** Retries: 0
** Total: 0
************************************************************
"""
self.assertEqual(out.getvalue(), expected)
def testReportStatsEmpty(self):
retry_stats.SetupStats()
out = StringIO.StringIO()
retry_stats.ReportStats(out)
# No data collected means no categories are known, nothing to report.
self.assertEqual(out.getvalue(), '')
def testReportStats(self):
retry_stats.SetupStats()
# Insert some stats to report.
retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
retry_stats.RetryWithStats(
self.CAT_B, self.handlerNoRetry, 3, self.callSuccess)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
out = StringIO.StringIO()
retry_stats.ReportStats(out)
# Expecting reports for both CAT and CAT_B used above.
expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 1
** Failure: 1
** Retries: 3
** Total: 2
************************************************************
************************************************************
** Performance Statistics for Test Service B
**
** Success: 1
** Failure: 0
** Retries: 0
** Total: 1
************************************************************
"""
self.assertEqual(out.getvalue(), expected)
def testSuccessNoSetup(self):
"""Verify that we can handle a successful call if we're never setup."""
self.assertEqual(retry_stats._STATS_COLLECTION, None)
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self.assertEqual(retry_stats._STATS_COLLECTION, None)
def testFailureNoRetryNoSetup(self):
"""Verify that we can handle a failure call if we're never setup."""
self.assertEqual(retry_stats._STATS_COLLECTION, None)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self.assertEqual(retry_stats._STATS_COLLECTION, None)
def testSuccess(self):
"""Verify that we can handle a successful call."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Succeed once.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=1)
# Succeed twice.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerNoRetry, 3, self.callSuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=2)
def testSuccessRetry(self):
"""Verify that we can handle a successful call after tries."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Use this scoped list as a persistent counter.
call_counter = ['fail 1', 'fail 2']
def callRetrySuccess():
if call_counter:
raise TestRetryException(call_counter.pop())
else:
return self.SUCCESS_RESULT
# Retry twice, then succeed.
result = retry_stats.RetryWithStats(
self.CAT, self.handlerRetry, 3, callRetrySuccess)
self.assertEqual(result, self.SUCCESS_RESULT)
self._verifyStats(self.CAT, success=1, retry=2)
def testFailureNoRetry(self):
"""Verify that we can handle a failure if the handler doesn't retry."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Fail once without retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=1)
# Fail twice without retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerNoRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=2)
def testFailureRetry(self):
"""Verify that we can handle a failure if we use all retries."""
retry_stats.SetupStats()
self._verifyStats(self.CAT)
# Fail once with exhausted retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=1, retry=3) # 3 retries = 4 attempts.
# Fail twice with exhausted retries.
self.assertRaises(TestRetryException,
retry_stats.RetryWithStats,
self.CAT, self.handlerRetry, 3, self.callFailure)
self._verifyStats(self.CAT, failure=2, retry=6)
if __name__ == '__main__':
cros_test_lib.main()
| bsd-3-clause |
sunny-wyb/xen-4.1.2 | tools/python/logging/logging-0.4.9.2/test/logrecv.py | 42 | 15073 | #! /usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""
Simple socket-based logging event receiver for use with "logging.py" logging
module.
Should work under Python versions >= 1.5.2, except that source line information
is not available unless 'inspect' is.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
from select import select
import sys, string, struct, types, cPickle, socket
import logging, logging.handlers, logging.config
TIMEOUT = 10
if sys.platform == "win32":
RESET_ERROR = 10054
else:
RESET_ERROR = 0 #FIXME get correct value for Unix...
logging.raiseExceptions = 1
#
# TCP receiver
#
from SocketServer import ThreadingTCPServer, StreamRequestHandler
class LogRecordStreamHandler(StreamRequestHandler):
"""
Handler for a streaming logging request. It basically logs the record
using whatever logging policy is configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while 1:
try:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
except socket.error, e:
if type(e.args) != types.TupleType:
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
break
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
#if a name is specified, we use the named logger rather than the one
#implied by the record. This is so test harnesses don't get into
#endless loops (particularly log_test.py, which has this code and the
#client code in the same Python instance)
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
logger.handle(record)
class LogRecordSocketReceiver(ThreadingTCPServer):
"""
A simple-minded TCP socket-based logging receiver suitable for test
purposes.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#
# UDP receiver
#
from SocketServer import ThreadingUDPServer, DatagramRequestHandler
class LogRecordDatagramHandler(DatagramRequestHandler):
"""
Handler for a datagram logging request. It basically logs the record using
whatever logging policy is configured locally.
"""
def handle(self):
chunk = self.packet
slen = struct.unpack(">L", chunk[:4])[0]
chunk = chunk[4:]
assert len(chunk) == slen
obj = self.unPickle(chunk)
record = logging.LogRecord(None, None, "", 0, "", (), None)
record.__dict__.update(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
#if a name is specified, we use the named logger rather than the one
#implied by the record. This is so test harnesses don't get into
#endless loops (particularly log_test.py, which has this code and the
#client code in the same Python instance)
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
logger.handle(record)
def finish(self):
pass
class LogRecordDatagramReceiver(ThreadingUDPServer):
"""
A simple-minded UDP datagram-based logging receiver suitable for test
purposes.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=logging.handlers.DEFAULT_UDP_LOGGING_PORT,
handler=LogRecordDatagramHandler):
ThreadingUDPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#
# HTTP receiver
#
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import cgi
class LogRecordHTTPHandler(BaseHTTPRequestHandler):
def makeDict(self, fs):
dict = {}
for mfs in fs.list:
dict[mfs.name] = mfs.value
for key in ["args", "exc_info", "exc_text", "lineno", "msecs", "created",
"thread", "levelno", "relativeCreated"]:
if dict.has_key(key):
dict[key] = eval(dict[key])
return dict
def do_GET(self):
"""Serve a GET request."""
sts = "OK"
env = { 'REQUEST_METHOD' : 'GET'}
try:
i = string.find(self.path, '?')
if i >= 0:
env['QUERY_STRING'] = self.path[i + 1:]
fs = cgi.FieldStorage(environ=env)
dict = self.makeDict(fs)
record = logging.LogRecord(None, None, "", 0, "", (), None)
record.__dict__.update(dict)
self.handleLogRecord(record)
except Exception, e:
sts = "ERROR"
raise
self.send_head()
self.wfile.write("GET %s" % sts)
def handleLogRecord(self, record):
#if a name is specified, we use the named logger rather than the one
#implied by the record. This is so test harnesses don't get into
#endless loops (particularly log_test.py, which has this code and the
#client code in the same Python instance)
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
logger.handle(record)
def do_HEAD(self):
"""Serve a HEAD request."""
self.send_head()
def do_POST(self):
"""Serve a POST request."""
sts = "OK"
env = { 'REQUEST_METHOD' : 'POST'}
try:
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
#print self.headers
i = string.find(self.path, '?')
if i >= 0:
env['QUERY_STRING'] = self.path[i + 1:]
fs = cgi.FieldStorage(fp=self.rfile, environ=env)
dict = self.makeDict(fs)
record = logging.LogRecord(None, None, "", 0, "", (), None)
record.__dict__.update(dict)
self.handleLogRecord(record)
except Exception, e:
print e
sys.stdout.flush()
sts = "ERROR"
raise
self.send_head()
self.wfile.write("POST %s" % sts)
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def log_message(self, *args):
#comment out the following line if you don't want to show requests
#apply(BaseHTTPRequestHandler.log_message, (self,) + args)
pass
class LogRecordHTTPReceiver(HTTPServer):
def __init__(self, host='localhost', port=logging.handlers.DEFAULT_HTTP_LOGGING_PORT,
handler=LogRecordHTTPHandler):
HTTPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#
# SOAP receiver
#
try:
from ZSI import dispatch
logname = None
def log(args, created, exc_info, exc_text, filename, levelname, levelno, lineno, module, msecs, msg, name, pathname, process, relativeCreated, thread):
record = logging.LogRecord(None, None, "", 0, "", (), None)
record.args = eval(args)
record.exc_info = eval(exc_info)
record.exc_text = eval(exc_text)
record.created = created
record.filename = filename
record.module = module
record.levelname = levelname
record.lineno = lineno
record.levelno = levelno
record.msecs = msecs
record.msg = msg
record.name = name
record.pathname = pathname
record.process = process
record.relativeCreated = relativeCreated
record.thread = thread
#if a name is specified, we use the named logger rather than the one
#implied by the record. This is so test harnesses don't get into
#endless loops (particularly log_test.py, which has this code and the
#client code in the same Python instance)
if logname is not None:
lname = logname
else:
lname = name
logger = logging.getLogger(lname)
logger.handle(record)
class MySOAPRequestHandler(dispatch.SOAPRequestHandler):
def log_message(self, *args):
#comment out the following line if you don't want to show requests
#apply(BaseHTTPRequestHandler.log_message, (self,) + args)
pass
class SOAPServer(HTTPServer):
def __init__(self, port=logging.handlers.DEFAULT_SOAP_LOGGING_PORT):
address = ('', port)
HTTPServer.__init__(self, address, MySOAPRequestHandler)
self.abort = 0
self.timeout = 1
self.logname = None
self.docstyle = 0
self.nsdict = {}
self.typesmodule = None
self.rpc = 1
self.modules = (sys.modules["__main__"],)
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
global logname
logname = self.logname
self.handle_request()
abort = self.abort
except ImportError:
"Import failed"
SOAPServer = None
def runTCP(tcpserver=None):
if not tcpserver:
tcpserver = LogRecordSocketReceiver()
print "About to start TCP server..."
tcpserver.serve_until_stopped()
def runUDP(udpserver=None):
if not udpserver:
udpserver = LogRecordDatagramReceiver()
print "About to start UDP server..."
udpserver.serve_until_stopped()
def runHTTP(httpserver=None):
if not httpserver:
httpserver = LogRecordHTTPReceiver()
print "About to start HTTP server..."
httpserver.serve_until_stopped()
def runSOAP(soapserver=None):
if not SOAPServer:
print "Sorry, ZSI is not available. Install PyXML-0.6.6 and ZSI first."
print "See README.txt and python_logging.html for more information."
else:
if not soapserver:
soapserver = SOAPServer()
print "About to start SOAP server..."
soapserver.serve_until_stopped()
FORMAT_STR = "%(asctime)s %(name)-19s %(levelname)-5s - %(message)s"
if __name__ == "__main__":
if (len(sys.argv) < 2) or not (string.lower(sys.argv[1]) in \
["udp", "tcp", "http", "soap"]):
print "usage: logrecv.py [UDP|TCP|HTTP|SOAP]"
else:
#logging.basicConfig()
logging.config.fileConfig("logrecv.ini")
# both = string.lower(sys.argv[1]) == "both"
# hdlr = logging.FileHandler("test.log")
# hdlr.setFormatter(logging.Formatter(FORMAT_STR))
# logging.getLogger("").addHandler(hdlr)
# if both:
# import threading
# tcpthread = threading.Thread(target=runTCP)
# udpthread = threading.Thread(target=runUDP)
# tcpthread.start()
# udpthread.start()
# tcpthread.join()
# udpthread.join()
# else:
# tcp = string.lower(sys.argv[1]) == "tcp"
# if tcp:
# runTCP()
# else:
# runUDP()
arg = string.lower(sys.argv[1])
if arg == "tcp":
runTCP()
elif arg == "udp":
runUDP()
elif arg == "http":
runHTTP()
elif arg == "soap":
runSOAP()
| gpl-2.0 |
geekboxzone/lollipop_external_chromium_org_third_party_skia | platform_tools/android/gyp_gen/gypd_parser.py | 144 | 5764 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for parsing the gypd output from gyp.
"""
import os
def parse_dictionary(var_dict, d, current_target_name, dest_dir):
"""Helper function to get the meaningful entries in a dictionary.
Parse dictionary d, and store unique relevant entries in var_dict.
Recursively parses internal dictionaries and files that are referenced.
When parsing the 'libraries' list from gyp, entries in the form
'-l<name>' get assigned to var_dict.LOCAL_SHARED_LIBRARIES as 'lib<name>',
and entries in the form '[lib]<name>.a' get assigned to
var_dict.LOCAL_STATIC_LIBRARIES as 'lib<name>'.
Args:
var_dict: VarsDict object for storing the results of the parsing.
d: Dictionary object to parse.
current_target_name: The current target being parsed. If this dictionary
is a target, this will be its entry 'target_name'. Otherwise, this will
be the name of the target which contains this dictionary.
dest_dir: Destination for the eventual Android.mk that will be created from
this parse, relative to Skia trunk. Used to determine path for source
files.
"""
for source in d.get('sources', []):
# Compare against a lowercase version, in case files are named .H or .GYPI
lowercase_source = source.lower()
if lowercase_source.endswith('.h'):
# Android.mk does not need the header files.
continue
if lowercase_source.endswith('gypi'):
# The gypi files are included in sources, but the sources they included
# are also included. No need to parse them again.
continue
# The path is relative to the gyp folder, but Android wants the path
# relative to dest_dir.
rel_source = os.path.relpath(source, os.pardir)
rel_source = os.path.relpath(rel_source, dest_dir)
var_dict.LOCAL_SRC_FILES.add(rel_source)
for lib in d.get('libraries', []):
if lib.endswith('.a'):
# Remove the '.a'
lib = lib[:-2]
# Add 'lib', if necessary
if not lib.startswith('lib'):
lib = 'lib' + lib
var_dict.LOCAL_STATIC_LIBRARIES.add(lib)
else:
# lib will be in the form of '-l<name>'. Change it to 'lib<name>'
lib = lib.replace('-l', 'lib', 1)
var_dict.LOCAL_SHARED_LIBRARIES.add(lib)
for dependency in d.get('dependencies', []):
# Each dependency is listed as
# <path_to_file>:<target>#target
li = dependency.split(':')
assert(len(li) <= 2 and len(li) >= 1)
sub_targets = []
if len(li) == 2 and li[1] != '*':
sub_targets.append(li[1].split('#')[0])
sub_path = li[0]
assert(sub_path.endswith('.gyp'))
# Although the original reference is to a .gyp, parse the corresponding
# gypd file, which was constructed by gyp.
sub_path = sub_path + 'd'
parse_gypd(var_dict, sub_path, dest_dir, sub_targets)
if 'default_configuration' in d:
config_name = d['default_configuration']
# default_configuration is meaningless without configurations
assert('configurations' in d)
config = d['configurations'][config_name]
parse_dictionary(var_dict, config, current_target_name, dest_dir)
for flag in d.get('cflags', []):
var_dict.LOCAL_CFLAGS.add(flag)
for flag in d.get('cflags_cc', []):
var_dict.LOCAL_CPPFLAGS.add(flag)
for include in d.get('include_dirs', []):
if include.startswith('external'):
# This path is relative to the Android root. Leave it alone.
rel_include = include
else:
# As with source, the input path will be relative to gyp/, but Android
# wants relative to dest_dir.
rel_include = os.path.relpath(include, os.pardir)
rel_include = os.path.relpath(rel_include, dest_dir)
# No need to include the base directory.
if rel_include is os.curdir:
continue
rel_include = os.path.join('$(LOCAL_PATH)', rel_include)
# Remove a trailing slash, if present.
if rel_include.endswith('/'):
rel_include = rel_include[:-1]
var_dict.LOCAL_C_INCLUDES.add(rel_include)
# For the top level, libskia, include directories should be exported.
# FIXME (scroggo): Do not hard code this.
if current_target_name == 'libskia':
var_dict.LOCAL_EXPORT_C_INCLUDE_DIRS.add(rel_include)
for define in d.get('defines', []):
var_dict.DEFINES.add(define)
def parse_gypd(var_dict, path, dest_dir, desired_targets=None):
"""Parse a gypd file.
Open a file that consists of python dictionaries representing build targets.
Parse those dictionaries using parse_dictionary. Recursively parses
referenced files.
Args:
var_dict: VarsDict object for storing the result of the parse.
path: Path to gypd file.
dest_dir: Destination for the eventual Android.mk that will be created from
this parse, relative to Skia trunk. Used to determine path for source
files and include directories.
desired_targets: List of targets to be parsed from this file. If empty,
parse all targets.
"""
d = {}
with open(path, 'r') as f:
# Read the entire file as a dictionary
d = eval(f.read())
# The gypd file is structured such that the top level dictionary has an entry
# named 'targets'
for target in d['targets']:
target_name = target['target_name']
if target_name in var_dict.KNOWN_TARGETS:
# Avoid circular dependencies
continue
if desired_targets and target_name not in desired_targets:
# Our caller does not depend on this one
continue
# Add it to our known targets so we don't parse it again
var_dict.KNOWN_TARGETS.add(target_name)
parse_dictionary(var_dict, target, target_name, dest_dir)
| bsd-3-clause |
JimCircadian/ansible | lib/ansible/modules/network/aci/aci_static_binding_to_epg.py | 1 | 14163 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_static_binding_to_epg
short_description: Bind static paths to EPGs (fv:RsPathAtt)
description:
- Bind static paths to EPGs on Cisco ACI fabrics.
notes:
- The C(tenant), C(ap), C(epg) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg) modules can be used for this.
- More information about the internal APIC classes B(fv:RsPathAtt) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
epg:
description:
- The name of the end point group.
aliases: [ epg_name ]
description:
description:
- Description for the static path to EPG binding.
aliases: [ descr ]
version_added: '2.7'
encap_id:
description:
- The encapsulation ID associating the C(epg) with the interface path.
- This acts as the secondary C(encap_id) when using micro-segmentation.
- Accepted values are any valid encap ID for specified encap, currently ranges between C(1) and C(4096).
aliases: [ vlan, vlan_id ]
primary_encap_id:
description:
- Determines the primary encapsulation ID associating the C(epg)
with the interface path when using micro-segmentation.
- Accepted values are any valid encap ID for specified encap, currently ranges between C(1) and C(4096).
aliases: [ primary_vlan, primary_vlan_id ]
deploy_immediacy:
description:
- The Deployement Immediacy of Static EPG on PC, VPC or Interface.
- The APIC defaults to C(lazy) when unset during creation.
choices: [ immediate, lazy ]
interface_mode:
description:
- Determines how layer 2 tags will be read from and added to frames.
- Values C(802.1p) and C(native) are identical.
- Values C(access) and C(untagged) are identical.
- Values C(regular), C(tagged) and C(trunk) are identical.
- The APIC defaults to C(trunk) when unset during creation.
choices: [ 802.1p, access, native, regular, tagged, trunk, untagged ]
aliases: [ interface_mode_name, mode ]
interface_type:
description:
- The type of interface for the static EPG deployement.
choices: [ fex, port_channel, switch_port, vpc ]
default: switch_port
pod_id:
description:
- The pod number part of the tDn.
- C(pod_id) is usually an integer below 10.
aliases: [ pod, pod_number ]
leafs:
description:
- The switch ID(s) that the C(interface) belongs to.
- When C(interface_type) is C(switch_port), C(port_channel), or C(fex), then C(leafs) is a string of the leaf ID.
- When C(interface_type) is C(vpc), then C(leafs) is a list with both leaf IDs.
- The C(leafs) value is usually something like '101' or '101-102' depending on C(connection_type).
aliases: [ leaves, nodes, paths, switches ]
interface:
description:
- The C(interface) string value part of the tDn.
- Usually a policy group like "test-IntPolGrp" or an interface of the following format "1/7" depending on C(interface_type).
extpaths:
description:
- The C(extpaths) integer value part of the tDn.
- C(extpaths) is only used if C(interface_type) is C(fex).
- Usually something like C(1011).
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Deploy Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
encap_id: 222
deploy_immediacy: lazy
interface_mode: untagged
interface_type: switch_port
pod_id: 1
leafs: 101
interface: '1/7'
state: present
- name: Remove Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
interface_type: switch_port
pod: 1
leafs: 101
interface: '1/7'
state: absent
- name: Get specific Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
interface_type: switch_port
pod: 1
leafs: 101
interface: '1/7'
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
# TODO: change 'deploy_immediacy' to 'resolution_immediacy' (as seen in aci_epg_to_domain)?
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
encap_id=dict(type='int', aliases=['vlan', 'vlan_id']),
primary_encap_id=dict(type='int', aliases=['primary_vlan', 'primary_vlan_id']),
deploy_immediacy=dict(type='str', choices=['immediate', 'lazy']),
interface_mode=dict(type='str', choices=['802.1p', 'access', 'native', 'regular', 'tagged', 'trunk', 'untagged'],
aliases=['interface_mode_name', 'mode']),
interface_type=dict(type='str', default='switch_port', choices=['fex', 'port_channel', 'switch_port', 'vpc']),
pod_id=dict(type='int', aliases=['pod', 'pod_number']), # Not required for querying all objects
leafs=dict(type='list', aliases=['leaves', 'nodes', 'paths', 'switches']),
interface=dict(type='str'),
extpaths=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['interface_type', 'fex', ['extpaths']],
['state', 'absent', ['ap', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']],
['state', 'present', ['ap', 'encap_id', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']],
],
)
tenant = module.params['tenant']
ap = module.params['ap']
epg = module.params['epg']
description = module.params['description']
encap_id = module.params['encap_id']
primary_encap_id = module.params['primary_encap_id']
deploy_immediacy = module.params['deploy_immediacy']
interface_mode = module.params['interface_mode']
interface_type = module.params['interface_type']
pod_id = module.params['pod_id']
# Users are likely to use integers for leaf IDs, which would raise an exception when using the join method
leafs = [str(leaf) for leaf in module.params['leafs']]
if leafs is not None:
if len(leafs) == 1:
if interface_type != 'vpc':
leafs = leafs[0]
else:
module.fail_json(msg='A interface_type of "vpc" requires 2 leafs')
elif len(leafs) == 2:
if interface_type == 'vpc':
leafs = "-".join(leafs)
else:
module.fail_json(msg='The interface_types "switch_port", "port_channel", and "fex" \
do not support using multiple leafs for a single binding')
else:
module.fail_json(msg='The "leafs" parameter must not have more than 2 entries')
interface = module.params['interface']
extpaths = module.params['extpaths']
state = module.params['state']
static_path = ''
if encap_id is not None:
if encap_id in range(1, 4097):
encap_id = 'vlan-{0}'.format(encap_id)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
if primary_encap_id is not None:
if primary_encap_id in range(1, 4097):
primary_encap_id = 'vlan-{0}'.format(primary_encap_id)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
INTERFACE_MODE_MAPPING = {
'802.1p': 'native',
'access': 'untagged',
'native': 'native',
'regular': 'regular',
'tagged': 'regular',
'trunk': 'regular',
'untagged': 'untagged',
}
INTERFACE_TYPE_MAPPING = dict(
fex='topology/pod-{0}/paths-{1}/extpaths-{2}/pathep-[eth{3}]'.format(pod_id, leafs, extpaths, interface),
port_channel='topology/pod-{0}/paths-{1}/pathep-[{2}]'.format(pod_id, leafs, interface),
switch_port='topology/pod-{0}/paths-{1}/pathep-[eth{2}]'.format(pod_id, leafs, interface),
vpc='topology/pod-{0}/protpaths-{1}/pathep-[{2}]'.format(pod_id, leafs, interface),
)
static_path = INTERFACE_TYPE_MAPPING[interface_type]
if interface_mode is not None:
interface_mode = INTERFACE_MODE_MAPPING[interface_mode]
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
filter_target='eq(fvAp.name, "{0}")'.format(ap),
module_object=ap,
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
filter_target='eq(fvAEPg.name, "{0}")'.format(epg),
module_object=epg,
),
subclass_3=dict(
aci_class='fvRsPathAtt',
aci_rn='rspathAtt-[{0}]'.format(static_path),
filter_target='eq(fvRsPathAtt.tDn, "{0}"'.format(static_path),
module_object=static_path,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvRsPathAtt',
class_config=dict(
descr=description,
encap=encap_id,
primaryEncap=primary_encap_id,
instrImedcy=deploy_immediacy,
mode=interface_mode,
tDn=static_path,
),
)
aci.get_diff(aci_class='fvRsPathAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
svn2github/vbox | src/VBox/ValidationKit/testboxscript/testboxtasks.py | 4 | 33761 | # -*- coding: utf-8 -*-
# $Id$
"""
TestBox Script - Async Tasks.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard python imports.
from datetime import datetime
import os
import re
import signal;
import sys
import subprocess
import threading
import time
# Validation Kit imports.
from common import constants
from common import utils;
from common import webutils;
import testboxcommons
# Figure where we are.
try: __file__
except: __file__ = sys.argv[0];
g_ksTestScriptDir = os.path.dirname(os.path.abspath(__file__));
class TestBoxBaseTask(object):
"""
Asynchronous task employing a thread to do the actual work.
"""
## Time to wait for a task to terminate.
kcSecTerminateTimeout = 60
def __init__(self, oTestBoxScript, cSecTimeout, fnThreadProc):
self._oTestBoxScript = oTestBoxScript;
self._cSecTimeout = cSecTimeout;
self._tsSecStarted = utils.timestampSecond();
self.__oRLock = threading.RLock();
self._oCv = threading.Condition(self.__oRLock);
self._fRunning = True; # Protected by lock.
self._fShouldTerminate = False; # Protected by lock.
# Spawn the worker thread.
self._oThread = threading.Thread(target=fnThreadProc);
self._oThread.daemon = True;
self._oThread.start();
def _lock(self):
""" Take the CV lock. """
self._oCv.acquire();
def _unlock(self):
""" Release the CV lock. """
self._oCv.release();
def _complete(self):
"""
Indicate that the task is complete, waking up the main thread.
Usually called at the end of the thread procedure.
"""
self._lock();
self._fRunning = False;
self._oCv.notifyAll();
self._unlock();
def isRunning(self):
""" Check if the task is still running. """
self._lock();
fRunning = self._fRunning;
self._unlock();
return fRunning;
def wait(self, cSecTimeout):
""" Wait for the task to complete. """
self._lock();
fRunning = self._fRunning;
if fRunning is True and cSecTimeout > 0:
self._oCv.wait(cSecTimeout)
self._unlock();
return fRunning;
def terminate(self, cSecTimeout = kcSecTerminateTimeout):
""" Terminate the task. """
self._lock();
self._fShouldTerminate = True;
self._unlock();
return self.wait(cSecTimeout);
def _shouldTerminate(self):
"""
Returns True if we should terminate, False if not.
"""
self._lock();
fShouldTerminate = self._fShouldTerminate == True;
self._unlock();
return fShouldTerminate;
class TestBoxTestDriverTask(TestBoxBaseTask):
"""
Base class for tasks involving test drivers.
"""
## When to flush the backlog of log messages.
kcchMaxBackLog = 32768;
## The backlog sync time (seconds).
kcSecBackLogFlush = 30;
## The timeout for the cleanup job (5 mins).
kcSecCleanupTimeout = 300;
## The timeout to wait for the abort command before killing it.
kcSecAbortTimeout = 300;
## The timeout to wait for the final output to be processed.
kcSecFinalOutputTimeout = 180;
## The timeout to wait for the abort command output to be processed.
kcSecAbortCmdOutputTimeout = 30;
## The timeout to wait for the terminate output to be processed.
kcSecTerminateOutputTimeout = 30;
## The timeout to wait for the kill output to be processed.
kcSecKillOutputTimeout = 30;
## The timeout for talking to the test manager.
ksecTestManagerTimeout = 60;
def __init__(self, oTestBoxScript, fnThreadProc, cSecTimeout, idResult, sScriptCmdLine):
"""
Class instance init
"""
# Init our instance data.
self._idResult = idResult;
self._sScriptCmdLine = sScriptCmdLine;
self._oChild = None;
self._oBackLogLock = threading.RLock();
self._oBackLogFlushLock = threading.RLock();
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
# Init super.
TestBoxBaseTask.__init__(self, oTestBoxScript, cSecTimeout, fnThreadProc);
def terminate(self, cSecTimeout = kcSecCleanupTimeout):
""" Reimplement with higher default timeout. """
return TestBoxBaseTask.terminate(self, cSecTimeout);
def _logFlush(self, oGivenConnection = None):
"""
Flushes the log to the test manager.
No exceptions.
"""
fRc = True;
self._oBackLogFlushLock.acquire();
# Grab the current back log.
self._oBackLogLock.acquire();
asBackLog = self._asBackLog;
self._asBackLog = [];
self._cchBackLog = 0;
self._secTsBackLogFlush = utils.timestampSecond();
self._oBackLogLock.release();
# If there is anything to flush, flush it.
if len(asBackLog) > 0:
sBody = '';
for sLine in asBackLog:
sBody += sLine + '\n';
oConnection = None;
try:
if oGivenConnection is None:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
oConnection.close();
else:
oGivenConnection.postRequest(constants.tbreq.LOG_MAIN, {constants.tbreq.LOG_PARAM_BODY: sBody});
except Exception, oXcpt:
testboxcommons.log('_logFlush error: %s' % (oXcpt,));
if len(sBody) < self.kcchMaxBackLog * 4:
self._oBackLogLock.acquire();
asBackLog.extend(self._asBackLog);
self._asBackLog = asBackLog;
# Don't restore _cchBackLog as there is no point in retrying immediately.
self._oBackLogLock.release();
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
self._oBackLogFlushLock.release();
return fRc;
def flushLogOnConnection(self, oConnection):
"""
Attempts to flush the logon the given connection.
No exceptions.
"""
return self._logFlush(oConnection);
def _logInternal(self, sMessage, fPrefix = True, fFlushCheck = False):
"""
Internal logging.
Won't flush the backlog, returns a flush indicator so the caller can
do it instead.
"""
if fPrefix:
try:
oNow = datetime.utcnow();
sTs = '%02u:%02u:%02u.%06u ' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
except Exception, oXcpt:
sTs = 'oXcpt=%s ' % (oXcpt);
sFullMsg = sTs + sMessage;
else:
sFullMsg = sMessage;
self._oBackLogLock.acquire();
self._asBackLog.append(sFullMsg);
cchBackLog = self._cchBackLog + len(sFullMsg) + 1;
self._cchBackLog = cchBackLog;
secTsBackLogFlush = self._secTsBackLogFlush;
self._oBackLogLock.release();
testboxcommons.log(sFullMsg);
return fFlushCheck \
and ( cchBackLog >= self.kcchMaxBackLog \
or utils.timestampSecond() - secTsBackLogFlush >= self.kcSecBackLogFlush);
def _log(self, sMessage):
"""
General logging function, will flush.
"""
if self._logInternal(sMessage, fFlushCheck = True):
self._logFlush();
return True;
def _reportDone(self, sResult):
"""
Report EXEC job done to the test manager.
sResult is a value from constants.result.
"""
## @todo optimize this to use one server connection.
#
# Log it.
#
assert sResult in constants.result.g_kasValidResults;
self._log('Done %s' % (sResult,));
#
# Report it.
#
fRc = True;
secStart = utils.timestampSecond();
while True:
self._logFlush(); ## @todo Combine this with EXEC_COMPLETED.
oConnection = None;
try:
oConnection = self._oTestBoxScript.openTestManagerConnection();
oConnection.postRequest(constants.tbreq.EXEC_COMPLETED, {constants.tbreq.EXEC_COMPLETED_PARAM_RESULT: sResult});
oConnection.close();
except Exception, oXcpt:
if utils.timestampSecond() - secStart < self.ksecTestManagerTimeout:
self._log('_reportDone exception (%s) - retrying...' % (oXcpt,));
time.sleep(2);
continue;
self._log('_reportDone error: %s' % (oXcpt,));
if oConnection is not None: # Be kind to apache.
try: oConnection.close();
except: pass;
fRc = False;
break;
#
# Mark the task as completed.
#
self._complete();
return fRc;
def _assembleArguments(self, sAction, fWithInterpreter = True):
"""
Creates an argument array for subprocess.Popen, splitting the
sScriptCmdLine like bourne shell would.
fWithInterpreter is used (False) when checking that the script exists.
Returns None on bad input.
"""
#
# This is a good place to export the test set id to the environment.
#
os.environ['TESTBOX_TEST_SET_ID'] = str(self._idResult);
cTimeoutLeft = utils.timestampSecond() - self._tsSecStarted;
cTimeoutLeft = 0 if cTimeoutLeft >= self._cSecTimeout else self._cSecTimeout - cTimeoutLeft;
os.environ['TESTBOX_TIMEOUT'] = str(cTimeoutLeft);
os.environ['TESTBOX_TIMEOUT_ABS'] = str(self._tsSecStarted + self._cSecTimeout);
#
# Do replacements and split the command line into arguments.
#
if self._sScriptCmdLine.find('@ACTION@') >= 0:
sCmdLine = self._sScriptCmdLine.replace('@ACTION@', sAction);
else:
sCmdLine = self._sScriptCmdLine + ' ' + sAction;
for sVar in [ 'TESTBOX_PATH_BUILDS', 'TESTBOX_PATH_RESOURCES', 'TESTBOX_PATH_SCRATCH', 'TESTBOX_PATH_SCRIPTS',
'TESTBOX_PATH_UPLOAD', 'TESTBOX_UUID', 'TESTBOX_REPORTER', 'TESTBOX_ID', 'TESTBOX_TEST_SET_ID',
'TESTBOX_TIMEOUT', 'TESTBOX_TIMEOUT_ABS' ]:
if sCmdLine.find('${' + sVar + '}') >= 0:
sCmdLine = sCmdLine.replace('${' + sVar + '}', os.environ[sVar]);
asArgs = utils.argsSplit(sCmdLine);
#
# Massage argv[0]:
# - Convert portable slashes ('/') to the flavor preferred by the
# OS we're currently running on.
# - Run python script thru the current python interpreter (important
# on systems that doesn't sport native hash-bang script execution).
#
asArgs[0] = asArgs[0].replace('/', os.path.sep);
if not os.path.isabs(asArgs[0]):
asArgs[0] = os.path.join(self._oTestBoxScript.getPathScripts(), asArgs[0]);
if asArgs[0].endswith('.py') and fWithInterpreter:
if sys.executable is not None and len(sys.executable) > 0:
asArgs.insert(0, sys.executable);
else:
asArgs.insert(0, 'python');
return asArgs;
def _outputThreadProc(self, oChild, oStdOut, sAction):
"""
Thread procedure for the thread that reads the output of the child
process. We use a dedicated thread for this purpose since non-blocking
I/O may be hard to keep portable according to hints around the web...
"""
oThread = oChild.oOutputThread;
while not oThread.fPleaseQuit:
# Get a line.
try:
sLine = oStdOut.readline();
except Exception, oXcpt:
self._log('child (%s) pipe I/O error: %s' % (sAction, oXcpt,));
break;
# EOF?
if len(sLine) == 0:
break;
# Strip trailing new line (DOS and UNIX).
if sLine.endswith("\r\n"):
sLine = sLine[0:-2];
elif sLine.endswith("\n"):
sLine = sLine[0:-1];
# Log it.
if self._logInternal(sLine, fPrefix = False, fFlushCheck = True):
self._logFlush();
# Close the stdout pipe in case we were told to get lost.
try:
oStdOut.close();
except Exception, oXcpt:
self._log('warning: Exception closing stdout pipe of "%s" child: %s' % (sAction, oXcpt,));
# This is a bit hacky, but try reap the child so it won't hang as
# defunkt during abort/timeout.
if oChild.poll() is None:
for _ in range(15):
time.sleep(0.2);
if oChild.poll() is not None:
break;
oChild = None;
return None;
def _spawnChild(self, sAction):
"""
Spawns the child process, returning success indicator + child object.
"""
# Argument list.
asArgs = self._assembleArguments(sAction)
if asArgs is None:
self._log('Malformed command line: "%s"' % (self._sScriptCmdLine,));
return (False, None);
# Spawn child.
try:
oChild = subprocess.Popen(asArgs,
shell = False,
bufsize = -1,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
cwd = self._oTestBoxScript.getPathSpill(),
universal_newlines = True,
close_fds = (False if utils.getHostOs() == 'win' else True),
preexec_fn = (None if utils.getHostOs() in ['win', 'os2']
else os.setsid)); # pylint: disable=E1101
except Exception, oXcpt:
self._log('Error creating child process %s: %s' % (asArgs, oXcpt));
return (False, None);
oChild.sTestBoxScriptAction = sAction;
# Start output thread, extending the child object to keep track of it.
oChild.oOutputThread = threading.Thread(target=self._outputThreadProc, args=(oChild, oChild.stdout, sAction))
oChild.oOutputThread.daemon = True;
oChild.oOutputThread.fPleaseQuit = False; # Our extension.
oChild.oOutputThread.start();
return (True, oChild);
def _monitorChild(self, cSecTimeout, fTryKillCommand = True, oChild = None):
"""
Monitors the child process. If the child executes longer that
cSecTimeout allows, we'll terminate it.
Returns Success indicator and constants.result value.
"""
if oChild is None:
oChild = self._oChild;
iProcGroup = oChild.pid;
if utils.getHostOs() in ['win', 'os2'] or iProcGroup <= 0:
iProcGroup = -2;
#
# Do timeout processing and check the health of the child.
#
sResult = constants.result.PASSED;
seStarted = utils.timestampSecond();
while True:
# Check status.
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" completed with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
oChild.oOutputThread.join(self.kcSecFinalOutputTimeout);
if oChild is self._oChild:
self._oChild = None;
if iRc == constants.rtexitcode.SKIPPED:
return (True, constants.result.SKIPPED);
if iRc != constants.rtexitcode.SUCCESS:
return (False, constants.result.FAILED);
return (True, constants.result.PASSED);
# Check for abort first, since that has less of a stigma.
if self._shouldTerminate() is True:
sResult = constants.result.ABORTED;
break;
# Check timeout.
cSecElapsed = utils.timestampSecond() - seStarted;
if cSecElapsed > cSecTimeout:
self._log('Timeout: %u secs (limit %u secs)' % (cSecElapsed, cSecTimeout));
sResult = constants.result.TIMED_OUT;
break;
# Wait.
cSecLeft = cSecTimeout - cSecElapsed;
oChild.oOutputThread.join(15 if cSecLeft > 15 else (cSecLeft + 1));
#
# If the child is still alive, try use the abort command to stop it
# very gently. This let's the testdriver clean up daemon processes
# and such that our code below won't catch.
#
if fTryKillCommand and oChild.poll() is None:
self._log('Attempting to abort child...');
(fRc2, oAbortChild) = self._spawnChild('abort');
if oAbortChild is not None and fRc2 is True:
self._monitorChild(self.kcSecAbortTimeout, False, oAbortChild);
oAbortChild = None;
#
# If the child is still alive, try the polite way.
#
if oChild.poll() is None:
self._log('Attempting to terminate child doing "%s"...' % (oChild.sTestBoxScriptAction,));
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGTERM); # pylint: disable=E1101
except Exception, oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
try:
self._oChild.terminate();
oChild.oOutputThread.join(self.kcSecTerminateOutputTimeout);
except Exception, oXcpt:
self._log('terminate() failed: %s' % (oXcpt,));
#
# If the child doesn't respond to polite, kill it. Always do a killpg
# should there be any processes left in the group.
#
if iProcGroup > 0:
try:
os.killpg(iProcGroup, signal.SIGKILL); # pylint: disable=E1101
except Exception, oXcpt:
self._log('killpg() failed: %s' % (oXcpt,));
if oChild.poll() is None:
self._log('Attemting to kill child doing "%s"...' % (oChild.sTestBoxScriptAction,));
try:
self._oChild.kill();
oChild.oOutputThread.join(self.kcSecKillOutputTimeout);
except Exception, oXcpt:
self._log('kill() failed: %s' % (oXcpt,));
#
# Give the whole mess a couple of more seconds to respond in case the
# output thread exitted prematurely for some weird reason.
#
if oChild.poll() is None:
time.sleep(2);
time.sleep(2);
time.sleep(2);
iRc = oChild.poll();
if iRc is not None:
self._log('Child doing "%s" aborted with exit code %d' % (oChild.sTestBoxScriptAction, iRc));
else:
self._log('Child doing "%s" is still running, giving up...' % (oChild.sTestBoxScriptAction,));
## @todo in this case we should probably try reboot the testbox...
oChild.oOutputThread.fPleaseQuit = True;
if oChild is self._oChild:
self._oChild = None;
return (False, sResult);
def _terminateChild(self):
"""
Terminates the child forcefully.
"""
if self._oChild is not None:
pass;
def _cleanupAfter(self):
"""
Cleans up after a test failure. (On success, cleanup is implicit.)
"""
assert self._oChild is None;
#
# Tell the script to clean up.
#
if len(self._sScriptCmdLine) > 0: # can be empty if cleanup crashed.
(fRc, self._oChild) = self._spawnChild('cleanup-after');
if fRc is True:
(fRc, _) = self._monitorChild(self.kcSecCleanupTimeout, False);
self._terminateChild();
else:
fRc = False;
#
# Wipe the stuff clean.
#
fRc2 = self._oTestBoxScript.reinitScratch(fnLog = self._log);
return fRc and fRc2;
class TestBoxCleanupTask(TestBoxTestDriverTask):
"""
Special asynchronous task for cleaning up a stale test when starting the
testbox script. It's assumed that the reason for the stale test lies in
it causing a panic, reboot, or similar, so we'll also try collect some
info about recent system crashes and reboots.
"""
def __init__(self, oTestBoxScript):
# Read the old state, throwing a fit if it's invalid.
sScriptState = oTestBoxScript.getPathState();
sScriptCmdLine = self._readStateFile(os.path.join(sScriptState, 'script-cmdline.txt'));
sResultId = self._readStateFile(os.path.join(sScriptState, 'result-id.txt'));
try:
idResult = int(sResultId);
if idResult <= 0 or idResult >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sResultId, os.path.join(sScriptState, 'result-id.txt')));
sTestBoxId = self._readStateFile(os.path.join(sScriptState, 'testbox-id.txt'));
try:
self.idTestBox = int(sTestBoxId);
if self.idTestBox <= 0 or self.idTestBox >= 0x7fffffff:
raise Exception('');
except:
raise Exception('Invalid id value "%s" found in %s' % (sTestBoxId, os.path.join(sScriptState, 'testbox-id.txt')));
self.sTestBoxName = self._readStateFile(os.path.join(sScriptState, 'testbox-name.txt'));
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, self.kcSecCleanupTimeout,
idResult, sScriptCmdLine);
@staticmethod
def _readStateFile(sPath):
"""
Reads a state file, returning a string on success and otherwise raising
an exception.
"""
try:
oFile = open(sPath, "rb");
sStr = oFile.read();
oFile.close();
return sStr.strip();
except Exception, oXcpt:
raise Exception('Failed to read "%s": %s' % (sPath, oXcpt));
def _threadProc(self):
"""
Perform the actual clean up on script startup.
"""
#
# First make sure we won't repeat this exercise should it turn out to
# trigger another reboot/panic/whatever.
#
sScriptCmdLine = os.path.join(self._oTestBoxScript.getPathState(), 'script-cmdline.txt');
try:
os.remove(sScriptCmdLine);
oFile = open(sScriptCmdLine, 'wb');
oFile.close();
except Exception, oXcpt:
self._log('Error truncating "%s": %s' % (sScriptCmdLine, oXcpt));
#
# Report the incident.
#
self._log('Seems we rebooted!');
self._log('script-cmdline="%s"' % (self._sScriptCmdLine));
self._log('result-id=%d' % (self._idResult));
self._log('testbox-id=%d' % (self.idTestBox));
self._log('testbox-name=%s' % (self.sTestBoxName));
self._logFlush();
# System specific info.
sOs = utils.getHostOs();
if sOs == 'darwin':
self._log('NVRAM Panic Info:\n%s\n' % (self.darwinGetPanicInfo(),));
self._logFlush();
## @todo Add some special command for reporting this situation so we get something
# useful in the event log.
#
# Do the cleaning up.
#
self._cleanupAfter();
self._reportDone(constants.result.REBOOTED);
return False;
def darwinGetPanicInfo(self):
"""
Returns a string with the aapl,panic-info content.
"""
# Retriev the info.
try:
sRawInfo = utils.processOutputChecked(['nvram', 'aapl,panic-info']);
except Exception, oXcpt:
return 'exception running nvram: %s' % (oXcpt,);
# Decode (%xx) and decompact it (7-bit -> 8-bit).
ahDigits = \
{
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
};
sInfo = '';
off = len('aapl,panic-info') + 1;
iBit = 0;
bLow = 0;
while off < len(sRawInfo):
# isprint is used to determine whether to %xx or %c it, so we have to
# be a little careful before assuming % sequences are hex bytes.
if sRawInfo[off] == '%' \
and off + 3 <= len(sRawInfo) \
and sRawInfo[off + 1] in ahDigits \
and sRawInfo[off + 2] in ahDigits:
bCur = ahDigits[sRawInfo[off + 1]] * 0x10 + ahDigits[sRawInfo[off + 2]];
off += 3;
else:
bCur = ord(sRawInfo[off]);
off += 1;
sInfo += chr(((bCur & (0x7f >> iBit)) << iBit) | bLow);
bLow = bCur >> (7 - iBit);
if iBit < 6:
iBit += 1;
else:
# Final bit in sequence.
sInfo += chr(bLow);
bLow = 0;
iBit = 0;
# Expand shorthand.
sInfo = sInfo.replace('@', 'com.apple.');
sInfo = sInfo.replace('>', 'com.apple.driver.');
sInfo = sInfo.replace('|', 'com.apple.iokit.');
sInfo = sInfo.replace('$', 'com.apple.security.');
sInfo = sInfo.replace('!A', 'Apple');
sInfo = sInfo.replace('!a', 'Action');
sInfo = sInfo.replace('!B', 'Bluetooth');
sInfo = sInfo.replace('!C', 'Controller');
sInfo = sInfo.replace('!F', 'Family');
sInfo = sInfo.replace('!I', 'Intel');
sInfo = sInfo.replace('!U', 'AppleUSB');
sInfo = sInfo.replace('!P', 'Profile');
# Done.
return sInfo
class TestBoxExecTask(TestBoxTestDriverTask):
"""
Implementation of a asynchronous EXEC task.
This uses a thread for doing the actual work, i.e. starting and monitoring
the child process, processing its output, and more.
"""
def __init__(self, oTestBoxScript, idResult, sScriptZips, sScriptCmdLine, cSecTimeout):
"""
Class instance init
"""
# Init our instance data.
self._sScriptZips = sScriptZips;
# Init super.
TestBoxTestDriverTask.__init__(self, oTestBoxScript, self._threadProc, cSecTimeout, idResult, sScriptCmdLine);
@staticmethod
def _writeStateFile(sPath, sContent):
"""
Writes a state file, raising an exception on failure.
"""
try:
oFile = open(sPath, "wb");
oFile.write(sContent);
oFile.flush();
try: os.fsync(oFile.fileno());
except: pass;
oFile.close();
except Exception, oXcpt:
raise Exception('Failed to write "%s": %s' % (sPath, oXcpt));
return True;
def _saveState(self):
"""
Saves the task state on disk so we can launch a TestBoxCleanupTask job
if the test should cause system panic or similar.
Note! May later be extended to support tests that reboots the host.
"""
sScriptState = self._oTestBoxScript.getPathState();
try:
self._writeStateFile(os.path.join(sScriptState, 'script-cmdline.txt'), self._sScriptCmdLine);
self._writeStateFile(os.path.join(sScriptState, 'result-id.txt'), str(self._idResult));
self._writeStateFile(os.path.join(sScriptState, 'testbox-id.txt'), str(self._oTestBoxScript.getTestBoxId()));
self._writeStateFile(os.path.join(sScriptState, 'testbox-name.txt'), self._oTestBoxScript.getTestBoxName());
except Exception, oXcpt:
self._log('Failed to write state: %s' % (oXcpt,));
return False;
return True;
def _downloadAndUnpackScriptZips(self):
"""
Downloads/copies the script ZIPs into TESTBOX_SCRIPT and unzips them to
the same directory.
Raises no exceptions, returns log + success indicator instead.
"""
sPathScript = self._oTestBoxScript.getPathScripts();
asArchives = self._sScriptZips.split(',');
for sArchive in asArchives:
sArchive = sArchive.strip();
if len(sArchive) == 0:
continue;
# Figure the destination name (in scripts).
sDstFile = webutils.getFilename(sArchive);
if len(sDstFile) < 1 \
or re.search('[^a-zA-Z0-9 !#$%&\'()@^_`{}~.-]', sDstFile) is not None: # FAT charset sans 128-255 + '.'.
self._log('Malformed script zip filename: %s' % (sArchive,));
return False;
sDstFile = os.path.join(sPathScript, sDstFile);
# Do the work.
if webutils.downloadFile(sArchive, sDstFile, self._oTestBoxScript.getPathBuilds(), self._log, self._log) is not True:
return False;
asFiles = utils.unpackFile(sDstFile, sPathScript, self._log, self._log);
if asFiles is None:
return False;
# Since zip files doesn't always include mode masks, set the X bit
# of all of them so we can execute binaries and hash-bang scripts.
for sFile in asFiles:
utils.chmodPlusX(sFile);
return True;
def _threadProc(self):
"""
Do the work of an EXEC command.
"""
sResult = constants.result.PASSED;
#
# Start by preparing the scratch directories.
#
# Note! Failures at this stage are not treated as real errors since
# they may be caused by the previous test and other circumstances
# so we don't want to go fail a build because of this.
#
fRc = self._oTestBoxScript.reinitScratch(self._logInternal);
fNeedCleanUp = fRc;
if fRc is True:
fRc = self._downloadAndUnpackScriptZips();
testboxcommons.log2('_threadProc: _downloadAndUnpackScriptZips -> %s' % (fRc,));
if fRc is not True:
sResult = constants.result.BAD_TESTBOX;
#
# Make sure the script exists.
#
if fRc is True:
sScript = self._assembleArguments('none', fWithInterpreter = False)[0];
if not os.path.exists(sScript):
self._log('The test driver script "%s" cannot be found.' % (sScript,));
sDir = sScript;
while len(sDir) > 3:
sDir = os.path.dirname(sDir);
if os.path.exists(sDir):
self._log('First existing parent directory is "%s".' % (sDir,));
break;
fRc = False;
if fRc is True:
#
# Start testdriver script.
#
fRc = self._saveState();
if fRc:
(fRc, self._oChild) = self._spawnChild('all');
testboxcommons.log2('_threadProc: _spawnChild -> %s, %s' % (fRc, self._oChild));
if fRc:
(fRc, sResult) = self._monitorChild(self._cSecTimeout);
testboxcommons.log2('_threadProc: _monitorChild -> %s' % (fRc,));
# If the run failed, do explicit cleanup.
if fRc is not True:
testboxcommons.log2('_threadProc: explicit cleanups...');
self._terminateChild();
self._cleanupAfter();
fNeedCleanUp = False;
assert self._oChild is None;
#
# Clean up scratch.
#
if fNeedCleanUp:
if self._oTestBoxScript.reinitScratch(self._logInternal) is not True:
self._log('post run reinitScratch failed.');
fRc = False;
#
# Report status and everything back to the test manager.
#
if fRc is False and sResult == constants.result.PASSED:
sResult = constants.result.FAILED;
self._reportDone(sResult);
return fRc;
| gpl-2.0 |
837468220/python-for-android | python-modules/twisted/twisted/lore/indexer.py | 57 | 1348 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
def setIndexFilename(filename='index.xhtml'):
global indexFilename
indexFilename = filename
def getIndexFilename():
global indexFilename
return indexFilename
def addEntry(filename, anchor, text, reference):
global entries
if not entries.has_key(text):
entries[text] = []
entries[text].append((filename, anchor, reference))
def clearEntries():
global entries
entries = {}
def generateIndex():
global entries
global indexFilename
if not indexFilename:
return
f = open(indexFilename, 'w')
sortedEntries = [(e.lower(), e) for e in entries]
sortedEntries.sort()
sortedEntries = [e[1] for e in sortedEntries]
for text in sortedEntries:
refs = []
f.write(text.replace('!', ', ') + ': ')
for (file, anchor, reference) in entries[text]:
refs.append('<a href="%s#%s">%s</a>' % (file, anchor, reference))
if text == 'infinite recursion':
refs.append('<em>See Also:</em> recursion, infinite\n')
if text == 'recursion!infinite':
refs.append('<em>See Also:</em> infinite recursion\n')
f.write('%s<br />\n' % ", ".join(refs))
f.close()
def reset():
clearEntries()
setIndexFilename()
reset()
| apache-2.0 |
mchristopher/PokemonGo-DesktopMap | app/pylibs/osx64/gevent/util.py | 12 | 1805 | # Copyright (c) 2009 Denis Bilenko. See LICENSE for details.
"""
Low-level utilities.
"""
from __future__ import absolute_import
import functools
__all__ = ['wrap_errors']
class wrap_errors(object):
"""
Helper to make function return an exception, rather than raise it.
Because every exception that is unhandled by greenlet will be logged,
it is desirable to prevent non-error exceptions from leaving a greenlet.
This can done with a simple ``try/except`` construct::
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, ValueError, AttributeError) as ex:
return ex
This class provides a shortcut to write that in one line::
wrapped_func = wrap_errors((TypeError, ValueError, AttributeError), func)
It also preserves ``__str__`` and ``__repr__`` of the original function.
"""
# QQQ could also support using wrap_errors as a decorator
def __init__(self, errors, func):
"""
Calling this makes a new function from *func*, such that it catches *errors* (an
:exc:`BaseException` subclass, or a tuple of :exc:`BaseException` subclasses) and
return it as a value.
"""
self.__errors = errors
self.__func = func
# Set __doc__, __wrapped__, etc, especially useful on Python 3.
functools.update_wrapper(self, func)
def __call__(self, *args, **kwargs):
func = self.__func
try:
return func(*args, **kwargs)
except self.__errors as ex:
return ex
def __str__(self):
return str(self.__func)
def __repr__(self):
return repr(self.__func)
def __getattr__(self, name):
return getattr(self.__func, name)
| mit |
odahoda/noisicaa | noisicaa/ui/track_list/view.py | 1 | 7375 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import fractions
import logging
import time as time_lib
import typing
from typing import Any, Optional, Dict
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from noisicaa import music
from noisicaa.ui import ui_base
from noisicaa.ui import slots
from noisicaa.ui import player_state as player_state_lib
from . import editor
from . import time_line
from . import toolbox
if typing.TYPE_CHECKING:
from noisicaa.ui import project_view as project_view_lib
logger = logging.getLogger(__name__)
class Frame(QtWidgets.QFrame):
def __init__(self, parent: Optional[QtWidgets.QWidget]) -> None:
super().__init__(parent)
self.setFrameStyle(QtWidgets.QFrame.Sunken | QtWidgets.QFrame.Panel)
self.__layout = QtWidgets.QVBoxLayout()
self.__layout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.__layout.setContentsMargins(1, 1, 1, 1)
self.setLayout(self.__layout)
def setWidget(self, widget: QtWidgets.QWidget) -> None:
self.__layout.addWidget(widget, 1)
class TrackListView(ui_base.ProjectMixin, slots.SlotContainer, QtWidgets.QSplitter):
playingChanged = QtCore.pyqtSignal(bool)
loopEnabledChanged = QtCore.pyqtSignal(bool)
currentTrack, setCurrentTrack, currentTrackChanged = slots.slot(
music.Track, 'currentTrack', allow_none=True)
def __init__(
self, *,
project_view: 'project_view_lib.ProjectView',
player_state: player_state_lib.PlayerState,
**kwargs: Any
) -> None:
super().__init__(**kwargs)
self.__project_view = project_view
self.__player_state = player_state
self.__session_prefix = 'tracklist:%s:' % self.project.id
self.__session_data_last_update = {} # type: Dict[str, float]
editor_frame = Frame(self)
self.__editor = editor.Editor(
player_state=self.__player_state,
parent=editor_frame, context=self.context)
editor_frame.setWidget(self.__editor)
self.__editor.currentTrackChanged.connect(self.setCurrentTrack)
self.currentTrackChanged.connect(self.__editor.setCurrentTrack)
self.__editor.setScaleX(self.__get_session_value('scale_x', self.__editor.scaleX()))
self.__editor.setXOffset(self.__get_session_value('x_offset', 0))
self.__editor.setYOffset(self.__get_session_value('y_offset', 0))
self.__editor.scaleXChanged.connect(self.__updateScaleX)
time_line_frame = Frame(self)
self.__time_line = time_line.TimeLine(
parent=time_line_frame,
player_state=self.__player_state,
context=self.context)
time_line_frame.setWidget(self.__time_line)
self.__time_line.setScaleX(self.__editor.scaleX())
self.__time_line.setXOffset(self.__editor.xOffset())
self.__editor.scaleXChanged.connect(self.__time_line.setScaleX)
self.__time_line.setAdditionalXOffset(self.__editor.sidebarWidth())
self.__editor.sidebarWidthChanged.connect(self.__time_line.setAdditionalXOffset)
scroll_x = QtWidgets.QScrollBar(orientation=Qt.Horizontal, parent=self)
scroll_x.setRange(0, self.__editor.maximumXOffset())
scroll_x.setSingleStep(50)
scroll_x.setPageStep(self.__editor.pageWidth())
scroll_x.setValue(self.__editor.xOffset())
scroll_y = QtWidgets.QScrollBar(orientation=Qt.Vertical, parent=self)
scroll_y.setRange(0, self.__editor.maximumYOffset())
scroll_y.setSingleStep(20)
scroll_y.setPageStep(self.__editor.pageHeight())
scroll_y.setValue(self.__editor.yOffset())
self.__editor.maximumXOffsetChanged.connect(scroll_x.setMaximum)
self.__editor.pageWidthChanged.connect(scroll_x.setPageStep)
self.__editor.xOffsetChanged.connect(scroll_x.setValue)
self.__time_line.xOffsetChanged.connect(scroll_x.setValue)
scroll_x.valueChanged.connect(self.__editor.setXOffset)
scroll_x.valueChanged.connect(self.__time_line.setXOffset)
scroll_x.valueChanged.connect(self.__updateXOffset)
self.__editor.maximumYOffsetChanged.connect(scroll_y.setMaximum)
self.__editor.pageHeightChanged.connect(scroll_y.setPageStep)
self.__editor.yOffsetChanged.connect(scroll_y.setValue)
scroll_y.valueChanged.connect(self.__editor.setYOffset)
scroll_y.valueChanged.connect(self.__updateYOffset)
self.setMinimumHeight(time_line_frame.minimumHeight())
editor_pane = QtWidgets.QWidget(self)
layout = QtWidgets.QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(1)
layout.addWidget(time_line_frame, 0, 0, 1, 1)
layout.addWidget(editor_frame, 1, 0, 1, 1)
layout.addWidget(scroll_x, 2, 0, 1, 1)
layout.addWidget(scroll_y, 1, 1, 1, 1)
editor_pane.setLayout(layout)
self.__toolbox = toolbox.Toolbox(parent=self, context=self.context)
self.__toolbox.setCurrentToolBox(self.__editor.currentToolBox())
self.__editor.currentToolBoxChanged.connect(self.__toolbox.setCurrentToolBox)
self.addWidget(self.__toolbox)
self.setStretchFactor(0, 0)
self.addWidget(editor_pane)
self.setStretchFactor(1, 1)
self.setCollapsible(1, False)
def __get_session_value(self, key: str, default: Any) -> Any:
return self.get_session_value(self.__session_prefix + key, default)
def __set_session_value(self, key: str, value: Any) -> None:
self.set_session_value(self.__session_prefix + key, value)
def __lazy_set_session_value(self, key: str, value: Any) -> None:
# TODO: value should be stored to session 5sec after most recent change. I.e. need
# some timer...
last_time = self.__session_data_last_update.get(key, 0)
if time_lib.time() - last_time > 5:
self.__set_session_value(key, value)
self.__session_data_last_update[key] = time_lib.time()
def __updateScaleX(self, scale: fractions.Fraction) -> None:
self.__set_session_value('scale_x', scale)
def __updateXOffset(self, offset: int) -> None:
self.__lazy_set_session_value('x_offset', offset)
def __updateYOffset(self, offset: int) -> None:
self.__lazy_set_session_value('y_offset', offset)
def setPlayerID(self, player_id: str) -> None:
self.__time_line.setPlayerID(player_id)
def cleanup(self) -> None:
self.__editor.cleanup()
| gpl-2.0 |
thehyve/variant | eggs/werkzeug-0.7.1-py2.7.egg/werkzeug/urls.py | 4 | 15702 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
This module implements various URL related functions.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import urlparse
from werkzeug._internal import _decode_unicode
from werkzeug.datastructures import MultiDict, iter_multi_items
#: list of characters that are always safe in URLs.
_always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789_.-')
_safe_map = dict((c, c) for c in _always_safe)
for i in xrange(0x80):
c = chr(i)
if c not in _safe_map:
_safe_map[c] = '%%%02X' % i
_safe_map.update((chr(i), '%%%02X' % i) for i in xrange(0x80, 0x100))
_safemaps = {}
#: lookup table for encoded characters.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
def _quote(s, safe='/', _join=''.join):
assert isinstance(s, str), 'quote only works on bytes'
if not s or not s.rstrip(_always_safe + safe):
return s
try:
quoter = _safemaps[safe]
except KeyError:
safe_map = _safe_map.copy()
safe_map.update([(c, c) for c in safe])
_safemaps[safe] = quoter = safe_map.__getitem__
return _join(map(quoter, s))
def _quote_plus(s, safe=''):
if ' ' in s:
return _quote(s, safe + ' ').replace(' ', '+')
return _quote(s, safe)
def _safe_urlsplit(s):
"""the urlparse.urlsplit cache breaks if it contains unicode and
we cannot control that. So we force type cast that thing back
to what we think it is.
"""
rv = urlparse.urlsplit(s)
# we have to check rv[2] here and not rv[1] as rv[1] will be
# an empty bytestring in case no domain was given.
if type(rv[2]) is not type(s):
assert hasattr(urlparse, 'clear_cache')
urlparse.clear_cache()
rv = urlparse.urlsplit(s)
assert type(rv[2]) is type(s)
return rv
def _unquote(s, unsafe=''):
assert isinstance(s, str), 'unquote only works on bytes'
rv = s.split('%')
if len(rv) == 1:
return s
s = rv[0]
for item in rv[1:]:
try:
char = _hextochr[item[:2]]
if char in unsafe:
raise KeyError()
s += char + item[2:]
except KeyError:
s += '%' + item
return s
def _unquote_plus(s):
return _unquote(s.replace('+', ' '))
def _uri_split(uri):
"""Splits up an URI or IRI."""
scheme, netloc, path, query, fragment = _safe_urlsplit(uri)
port = None
if '@' in netloc:
auth, hostname = netloc.split('@', 1)
else:
auth = None
hostname = netloc
if hostname:
if ':' in hostname:
hostname, port = hostname.split(':', 1)
return scheme, auth, hostname, port, path, query, fragment
def iri_to_uri(iri, charset='utf-8'):
r"""Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug
always uses utf-8 URLs internally because this is what browsers and HTTP
do as well. In some places where it accepts an URL it also accepts a
unicode IRI and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
.. versionadded:: 0.6
:param iri: the iri to convert
:param charset: the charset for the URI
"""
iri = unicode(iri)
scheme, auth, hostname, port, path, query, fragment = _uri_split(iri)
scheme = scheme.encode('ascii')
hostname = hostname.encode('idna')
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _quote(auth.encode(charset))
if password:
auth += ':' + _quote(password.encode(charset))
hostname = auth + '@' + hostname
if port:
hostname += ':' + port
path = _quote(path.encode(charset), safe="/:~+")
query = _quote(query.encode(charset), safe="=%&[]:;$()+,!?*/")
# this absolutely always must return a string. Otherwise some parts of
# the system might perform double quoting (#61)
return str(urlparse.urlunsplit([scheme, hostname, path, query, fragment]))
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""Converts a URI in a given charset to a IRI.
Examples for URI versus IRI
>>> uri_to_iri('http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri('http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: the URI to convert
:param charset: the charset of the URI
:param errors: the error handling on decode
"""
uri = url_fix(str(uri), charset)
scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
scheme = _decode_unicode(scheme, 'ascii', errors)
try:
hostname = hostname.decode('idna')
except UnicodeError:
# dammit, that codec raised an error. Because it does not support
# any error handling we have to fake it.... badly
if errors not in ('ignore', 'replace'):
raise
hostname = hostname.decode('ascii', errors)
if auth:
if ':' in auth:
auth, password = auth.split(':', 1)
else:
password = None
auth = _decode_unicode(_unquote(auth), charset, errors)
if password:
auth += u':' + _decode_unicode(_unquote(password),
charset, errors)
hostname = auth + u'@' + hostname
if port:
# port should be numeric, but you never know...
hostname += u':' + port.decode(charset, errors)
path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
query = _decode_unicode(_unquote(query, ';/?:@&=+,$'),
charset, errors)
return urlparse.urlunsplit([scheme, hostname, path, query, fragment])
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='replace', separator='&', cls=None):
"""Parse a querystring and return it as :class:`MultiDict`. Per default
only values are decoded into unicode strings. If `decode_keys` is set to
`True` the same will happen for keys.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string.
:param decode_keys: set to `True` if you want the keys to be decoded
as well.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result)
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator='&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys.
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
tmp = []
for key, value in iterable:
if value is None:
continue
if encode_keys and isinstance(key, unicode):
key = key.encode(charset)
else:
key = str(key)
if isinstance(value, unicode):
value = value.encode(charset)
else:
value = str(value)
tmp.append('%s=%s' % (_quote(key),
_quote_plus(value)))
return separator.join(tmp)
def url_quote(s, charset='utf-8', safe='/:'):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
"""
if isinstance(s, unicode):
s = s.encode(charset)
elif not isinstance(s, str):
s = str(s)
return _quote(s, safe=safe)
def url_quote_plus(s, charset='utf-8', safe=''):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
"""
if isinstance(s, unicode):
s = s.encode(charset)
elif not isinstance(s, str):
s = str(s)
return _quote_plus(s, safe=safe)
def url_unquote(s, charset='utf-8', errors='replace'):
"""URL decode a single string with a given decoding.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
if isinstance(s, unicode):
s = s.encode(charset)
return _decode_unicode(_unquote(s), charset, errors)
def url_unquote_plus(s, charset='utf-8', errors='replace'):
"""URL decode a single string with the given decoding and decode
a "+" to whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
:param s: the string to unquote.
:param charset: the charset to be used.
:param errors: the error handling for the charset decoding.
"""
if isinstance(s, unicode):
s = s.encode(charset)
return _decode_unicode(_unquote_plus(s), charset, errors)
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'replace')
scheme, netloc, path, qs, anchor = _safe_urlsplit(s)
path = _quote(path, '/%')
qs = _quote_plus(qs, ':&%=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
if not base:
base = './'
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
base = self.base
if base[-1:] != '/':
base += '/'
return Href(urlparse.urljoin(base, name), self.charset, self.sort,
self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError('keyword arguments and query-dicts '
'can\'t be combined')
query, path = path[-1], path[:-1]
elif query:
query = dict([(k.endswith('_') and k[:-1] or k, v)
for k, v in query.items()])
path = '/'.join([url_quote(x, self.charset) for x in path
if x is not None]).lstrip('/')
rv = self.base
if path:
if not rv.endswith('/'):
rv += '/'
rv = urlparse.urljoin(rv, './' + path)
if query:
rv += '?' + url_encode(query, self.charset, sort=self.sort,
key=self.key)
return str(rv)
| apache-2.0 |
jgowans/CASPER_tools | shift_schedule_optimiser.py | 1 | 2405 | #!/usr/bin/env python
import logging
class ShiftSheduleOptimiser:
def __init__(self, shift_reg, fft_stages, overflow_reg, fpga, re_sync=None, logger=logging.getLogger()):
""" Create a Shift Schedule Optimiser instance.
Parameters:
shift_reg_name : string
Name of register which controlls shifting
fft_stages : int
The number of stages INSIDE the FFT block.
This is typicall two less than the number of FFT points.
overflow_reg : string
name of register which has its bit0 checked for overflow
fpga : corr.katcp_wrapper.FgpaClient
Interface to the FPGA
re_sync : function, optional
function to call for re-sync. If None, a re-sync will not be done
logger : logger
"""
logging.basicConfig()
self.shift_reg = shift_reg
self.fft_stages = fft_stages
self.overflow_reg = overflow_reg
self.fpga = fpga
self.re_sync = re_sync
self.logger = logger
def find_optimal(self):
# set all bits to 1
self.optimal = 2**(self.fft_stages) - 1
self.write_shift(self.optimal)
# itteratively try to clear bits
for bit_idx in range(self.fft_stages - 1, -1, -1): # range: [9, 8, 7, ..., 1, 0]
self.optimal &= ~(1 << bit_idx) # clear this bit
self.write_shift(self.optimal)
if (self.check_overflow()):
# oh no! That was bad. Set the bit back to 1
self.optimal |= (1 << bit_idx)
def write_shift(self, shift_val):
""" Sets the shift schedule to a value passed in and
re-syncs if possible
"""
self.fpga.write_int(self.shift_reg, shift_val)
loggin.debug("Shift value set to: {n}".format(n = shift_val))
if self.re_sync != None:
self.logger.debug("Doing a re-sync")
self.re_sync()
time.sleep(1) # delay to allow effect to manifest.
def check_overflow(self):
""" Returns true if overflow has happened
"""
# select lsb from overflow reg
overflow_val = self.fpga.read_uint(self.overflow_reg) & 0x1
if overflow_val != 0:
self.logger.debug("Overflow flag is SET")
return True
self.logger.debug("Overflow flag is CLEAR")
return False
| unlicense |
haoyuchen1992/CourseBuilder | modules/search/search.py | 4 | 20144 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search module that uses Google App Engine's full text search."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import gettext
import logging
import math
import mimetypes
import os
import time
import traceback
import jinja2
import resources
import webapp2
import appengine_config
from common import safe_dom
from controllers import sites
from controllers import utils
from models import config
from models import counters
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
from google.appengine.api import namespace_manager
from google.appengine.api import search
from google.appengine.ext import db
MODULE_NAME = 'Full Text Search'
CAN_INDEX_ALL_COURSES_IN_CRON = config.ConfigProperty(
'gcb_can_index_automatically', bool, safe_dom.Text(
'Whether the search module can automatically index the course daily '
'using a cron job. If enabled, this job would index the course '
'incrementally so that only new items or items which have not been '
'recently indexed are indexed.'),
default_value=False)
SEARCH_QUERIES_MADE = counters.PerfCounter(
'gcb-search-queries-made',
'The number of student queries made to the search module.')
SEARCH_RESULTS_RETURNED = counters.PerfCounter(
'gcb-search-results-returned',
'The number of search results returned across all student queries.')
SEARCH_FAILURES = counters.PerfCounter(
'gcb-search-failures',
'The number of search failure messages returned across all student '
'queries.')
INDEX_NAME = 'gcb_search_index'
RESULTS_LIMIT = 10
GCB_SEARCH_FOLDER_NAME = os.path.normpath('/modules/search/')
MAX_RETRIES = 5
# I18N: Message displayed on search results page when error occurs.
SEARCH_ERROR_TEXT = gettext.gettext('Search is currently unavailable.')
class ModuleDisabledException(Exception):
"""Exception thrown when the search module is disabled."""
pass
def get_index(course):
return search.Index(name=INDEX_NAME,
namespace=course.app_context.get_namespace_name())
def index_all_docs(course, incremental):
"""Index all of the docs for a given models.Course object.
Args:
course: models.courses.Course. the course to index.
incremental: boolean. whether or not to index only new or out-of-date
items.
Returns:
A dict with three keys.
'num_indexed_docs' maps to an int, the number of documents added to the
index.
'doc_type' maps to a counter with resource types as keys mapping to the
number of that resource added to the index.
'indexing_time_secs' maps to a float representing the number of seconds
the indexing job took.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
start_time = time.time()
index = get_index(course)
timestamps, doc_types = (_get_index_metadata(index) if incremental
else ({}, {}))
for doc in resources.generate_all_documents(course, timestamps):
retry_count = 0
while retry_count < MAX_RETRIES:
try:
index.put(doc)
timestamps[doc.doc_id] = doc['date'][0].value
doc_types[doc.doc_id] = doc['type'][0].value
break
except search.Error, e:
if e.results[0].code == search.OperationResult.TRANSIENT_ERROR:
retry_count += 1
if retry_count >= MAX_RETRIES:
logging.error(
'Multiple transient errors indexing doc_id: %s',
doc.doc_id)
else:
logging.error('Failed to index doc_id: %s', doc.doc_id)
break
total_time = '%.2f' % (time.time() - start_time)
indexed_doc_types = collections.Counter()
for type_name in doc_types.values():
indexed_doc_types[type_name] += 1
return {'num_indexed_docs': len(timestamps),
'doc_types': indexed_doc_types,
'indexing_time_secs': total_time}
def clear_index(course):
"""Delete all docs in the index for a given models.Course object."""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(course)
doc_ids = [document.doc_id for document in index.get_range(ids_only=True)]
total_docs = len(doc_ids)
while doc_ids:
index.delete(doc_ids)
doc_ids = [document.doc_id
for document in index.get_range(ids_only=True)]
return {'deleted_docs': total_docs}
def _get_index_metadata(index):
"""Returns dict from doc_id to timestamp and one from doc_id to doc_type."""
timestamps = []
doc_types = []
cursor = search.Cursor()
while cursor:
options = search.QueryOptions(
limit=1000,
cursor=cursor,
returned_fields=['date', 'type'])
query = search.Query(query_string='', options=options)
current_docs = index.search(query)
cursor = current_docs.cursor
for doc in current_docs:
timestamps.append((doc.doc_id, doc['date'][0].value))
doc_types.append((doc.doc_id, doc['type'][0].value))
return dict(timestamps), dict(doc_types)
def fetch(course, query_string, offset=0, limit=RESULTS_LIMIT):
"""Return an HTML fragment with the results of a search for query_string.
Args:
course: models.courses.Course. the course to search.
query_string: str. the user's specified query.
offset: int. the number of results to skip.
limit: int. the number of results to return.
Returns:
A dict with two keys.
'results' maps to an ordered list of resources.Result objects.
'total_found' maps to the total number of results in the index which
match query_string.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(course)
try:
# TODO(emichael): Don't compute these for every query
returned_fields = resources.get_returned_fields()
snippeted_fields = resources.get_snippeted_fields()
options = search.QueryOptions(
limit=limit,
offset=offset,
returned_fields=returned_fields,
number_found_accuracy=100,
snippeted_fields=snippeted_fields)
query = search.Query(query_string=query_string, options=options)
results = index.search(query)
except search.Error:
logging.info('Failed searching for: %s', query_string)
return {'results': None, 'total_found': 0}
processed_results = resources.process_results(results)
return {'results': processed_results, 'total_found': results.number_found}
class SearchHandler(utils.BaseHandler):
"""Handler for generating the search results page."""
def get(self):
"""Process GET request."""
# TODO(emichael): move timing to Javascript
if not custom_module.enabled:
self.error(404)
return
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
try:
start = time.time()
# TODO(emichael): Don't use get because it can't handle utf-8
query = self.request.get('query')
offset = self.request.get('offset')
self.template_value['navbar'] = {}
if query:
try:
offset = int(offset)
except (ValueError, TypeError):
offset = 0
self.template_value['query'] = query
SEARCH_QUERIES_MADE.inc()
response = fetch(self.get_course(), query, offset=offset)
self.template_value['time'] = '%.2f' % (time.time() - start)
self.template_value['search_results'] = response['results']
total_found = response['total_found']
if offset + RESULTS_LIMIT < total_found:
self.template_value['next_link'] = (
'search?query=%s&offset=%d' %
(query, offset + RESULTS_LIMIT))
if offset - RESULTS_LIMIT >= 0:
self.template_value['previous_link'] = (
'search?query=%s&offset=%d' %
(query, offset - RESULTS_LIMIT))
self.template_value['page_number'] = offset / RESULTS_LIMIT + 1
self.template_value['total_pages'] = int(math.ceil(
float(total_found) / RESULTS_LIMIT))
if response['results']:
SEARCH_RESULTS_RETURNED.inc(len(response['results']))
# TODO(emichael): Remove this check when the unicode issue is fixed in
# dev_appserver.
except UnicodeEncodeError as e:
SEARCH_FAILURES.inc()
if not appengine_config.PRODUCTION_MODE:
# This message will only be displayed to the course author in
# dev, so it does not need to be I18N'd
self.template_value['search_error'] = (
'There is a known issue in App Engine\'s SDK '
'(code.google.com/p/googleappengine/issues/detail?id=9335) '
'which causes an error when generating search snippets '
'which contain non-ASCII characters. This error does not '
'occur in the production environment, so you can safely '
'run your course with unicode characters on appspot.com.')
logging.error('[Unicode/Dev server issue] Error rendering the '
'search page: %s.', e)
else:
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
except Exception as e: # pylint: disable-msg=broad-except
SEARCH_FAILURES.inc()
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
finally:
path = sites.abspath(self.app_context.get_home_folder(),
GCB_SEARCH_FOLDER_NAME)
template = self.get_template('search.html', additional_dirs=[path])
self.template_value['navbar'] = {}
self.response.out.write(template.render(self.template_value))
class AssetsHandler(webapp2.RequestHandler):
"""Content handler for assets associated with search."""
def get(self):
"""Respond to HTTP GET methods."""
if not custom_module.enabled:
self.error(404)
return
path = self.request.path
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
if os.path.basename(os.path.dirname(path)) != 'assets':
self.error(404)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
sites.set_static_resource_cache_control(self)
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
stream = open(resource_file)
self.response.write(stream.read())
except IOError:
self.error(404)
class SearchDashboardHandler(object):
"""Should only be inherited by DashboardHandler, not instantiated."""
def get_search(self):
"""Renders course indexing view."""
template_values = {
'page_title': self.format_title('Search'),
'page_title_linked': self.format_title('Search', as_link=True),
}
mc_template_value = {}
mc_template_value['module_enabled'] = custom_module.enabled
indexing_job = IndexCourse(self.app_context).load()
clearing_job = ClearIndex(self.app_context).load()
if indexing_job and (not clearing_job or
indexing_job.updated_on > clearing_job.updated_on):
if indexing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Indexing in progress.'
mc_template_value['job_in_progress'] = True
elif indexing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['indexed'] = True
mc_template_value['last_updated'] = (
indexing_job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT))
mc_template_value['index_info'] = transforms.loads(
indexing_job.output)
elif indexing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Indexing job failed with error: %s' % indexing_job.output)
elif clearing_job:
if clearing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Clearing in progress.'
mc_template_value['job_in_progress'] = True
elif clearing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['status_message'] = (
'The index has been cleared.')
elif clearing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Clearing job failed with error: %s' % clearing_job.output)
else:
mc_template_value['status_message'] = (
'No indexing job has been run yet.')
mc_template_value['index_course_xsrf_token'] = self.create_xsrf_token(
'index_course')
mc_template_value['clear_index_xsrf_token'] = self.create_xsrf_token(
'clear_index')
template_values['main_content'] = jinja2.Markup(self.get_template(
'search_dashboard.html', [os.path.dirname(__file__)]
).render(mc_template_value, autoescape=True))
self.render_page(template_values)
def post_index_course(self):
"""Submits a new indexing operation."""
try:
incremental = self.request.get('incremental') == 'true'
check_jobs_and_submit(IndexCourse(self.app_context, incremental),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
def post_clear_index(self):
"""Submits a new indexing operation."""
try:
check_jobs_and_submit(ClearIndex(self.app_context),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
class CronHandler(utils.BaseHandler):
"""Iterates through all courses and starts an indexing job for each one.
All jobs should be submitted through the transactional check_jobs_and_submit
method to prevent multiple index operations from running at the same time.
If an index job is currently running when this cron job attempts to start
one, this operation will be a noop for that course.
"""
def get(self):
"""Start an index job for each course."""
cron_logger = logging.getLogger('modules.search.cron')
self.response.headers['Content-Type'] = 'text/plain'
if CAN_INDEX_ALL_COURSES_IN_CRON.value:
counter = 0
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
counter += 1
try:
check_jobs_and_submit(IndexCourse(context), context)
except db.TransactionFailedError as e:
cron_logger.info(
'Failed to submit job #%s in namespace %s: %s',
counter, namespace, e)
else:
cron_logger.info(
'Index job #%s submitted for namespace %s.',
counter, namespace)
cron_logger.info('All %s indexing jobs started; cron job complete.',
counter)
else:
cron_logger.info('Automatic indexing disabled. Cron job halting.')
self.response.write('OK\n')
@db.transactional(xg=True)
def check_jobs_and_submit(job, app_context):
"""Determines whether an indexing job is running and submits if not."""
indexing_job = IndexCourse(app_context).load()
clearing_job = ClearIndex(app_context).load()
bad_status_codes = [jobs.STATUS_CODE_STARTED, jobs.STATUS_CODE_QUEUED]
if ((indexing_job and indexing_job.status_code in bad_status_codes) or
(clearing_job and clearing_job.status_code in bad_status_codes)):
raise db.TransactionFailedError('Index job is currently running.')
else:
job.non_transactional_submit()
class IndexCourse(jobs.DurableJob):
"""A job that indexes the course."""
def __init__(self, app_context, incremental=True):
super(IndexCourse, self).__init__(app_context)
self.incremental = incremental
def run(self):
"""Index the course."""
namespace = namespace_manager.get_namespace()
logging.info('Running indexing job for namespace %s. Incremental: %s',
namespace_manager.get_namespace(), self.incremental)
app_context = sites.get_app_context_for_namespace(namespace)
course = courses.Course(None, app_context=app_context)
return index_all_docs(course, self.incremental)
class ClearIndex(jobs.DurableJob):
"""A job that clears the index for a course."""
def run(self):
"""Clear the index."""
namespace = namespace_manager.get_namespace()
logging.info('Running clearing job for namespace %s.', namespace)
app_context = sites.get_app_context_for_namespace(namespace)
course = courses.Course(None, app_context=app_context)
return clear_index(course)
# Module registration
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_routes = [
('/modules/search/assets/.*', AssetsHandler),
('/cron/search/index_courses', CronHandler)
]
namespaced_routes = [
('/search', SearchHandler)
]
global custom_module
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides search capabilities for courses',
global_routes, namespaced_routes)
return custom_module
| apache-2.0 |
Kast0rTr0y/ansible | lib/ansible/modules/network/junos/junos_rpc.py | 10 | 4539 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: junos_rpc
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Runs an arbitrary RPC on the remote device over NetConf
description:
- Sends a request to the remote device running JUNOS to execute the
specified RPC using the NetConf transport. The reply is then
returned to the playbook in the c(xml) key. If an alternate output
format is requested, the reply is transformed to the requested output.
options:
rpc:
description:
- The C(rpc) argument specifies the RPC call to send to the
remote devices to be executed. The RPC Reply message is parsed
and the contents are returned to the playbook.
required: true
args:
description:
- The C(args) argument provides a set of arguments for the RPC
call and are encoded in the request message. This argument
accepts a set of key=value arguments.
required: false
default: null
output:
description:
- The C(output) argument specifies the desired output of the
return data. This argument accepts one of C(xml), C(text),
or C(json). For C(json), the JUNOS device must be running a
version of software that supports native JSON output.
required: false
default: xml
"""
EXAMPLES = """
- name: collect interface information using rpc
junos_rpc:
rpc: get-interface-information
args:
interface: em0
media: True
- name: get system information
junos_rpc:
rpc: get-system-information
"""
RETURN = """
xml:
description: The xml return string from the rpc request
returned: always
output:
description: The rpc rely converted to the output format
returned: always
output_lines:
description: The text output split into lines for readability
returned: always
"""
from ncclient.xml_ import new_ele, sub_ele, to_xml, to_ele
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netconf import send_request
from ansible.module_utils.six import iteritems
def main():
"""main entry point for Ansible module
"""
argument_spec = dict(
rpc=dict(required=True),
args=dict(type='dict'),
output=dict(default='xml', choices=['xml', 'json', 'text']),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
result = {'changed': False}
rpc = str(module.params['rpc']).replace('_', '-')
if all((module.check_mode, not rpc.startswith('get'))):
module.fail_json(msg='invalid rpc for running in check_mode')
args = module.params['args'] or {}
xattrs = {'format': module.params['output']}
element = new_ele(module.params['rpc'], xattrs)
for key, value in iteritems(args):
key = str(key).replace('_', '-')
if isinstance(value, list):
for item in value:
child = sub_ele(element, key)
if item is not True:
child.text = item
else:
child = sub_ele(element, key)
if value is not True:
child.text = value
reply = send_request(module, element)
result['xml'] = str(to_xml(reply))
if module.params['output'] == 'text':
reply = to_ele(reply)
data = reply.xpath('//output')
result['output'] = data[0].text.strip()
result['output_lines'] = result['output'].split('\n')
elif module.params['output'] == 'json':
reply = to_ele(reply)
data = reply.xpath('//rpc-reply')
result['output'] = module.from_json(data[0].text.strip())
else:
result['output'] = str(to_xml(reply)).split('\n')
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sfanous/Pyecobee | pyecobee/objects/utility.py | 1 | 2397 | """
This module is home to the Utility class
"""
from pyecobee.ecobee_object import EcobeeObject
class Utility(EcobeeObject):
"""
This class has been auto generated by scraping
https://www.ecobee.com/home/developer/api/documentation/v1/objects/Utility.shtml
Attribute names have been generated by converting ecobee property
names from camelCase to snake_case.
A getter property has been generated for each attribute.
A setter property has been generated for each attribute whose value
of READONLY is "no".
An __init__ argument without a default value has been generated if
the value of REQUIRED is "yes".
An __init__ argument with a default value of None has been generated
if the value of REQUIRED is "no".
"""
__slots__ = ['_name', '_phone', '_email', '_web']
attribute_name_map = {
'name': 'name',
'phone': 'phone',
'email': 'email',
'web': 'web',
}
attribute_type_map = {
'name': 'six.text_type',
'phone': 'six.text_type',
'email': 'six.text_type',
'web': 'six.text_type',
}
def __init__(self, name=None, phone=None, email=None, web=None):
"""
Construct an Utility instance
"""
self._name = name
self._phone = phone
self._email = email
self._web = web
@property
def name(self):
"""
Gets the name attribute of this Utility instance.
:return: The value of the name attribute of this Utility
instance.
:rtype: six.text_type
"""
return self._name
@property
def phone(self):
"""
Gets the phone attribute of this Utility instance.
:return: The value of the phone attribute of this Utility
instance.
:rtype: six.text_type
"""
return self._phone
@property
def email(self):
"""
Gets the email attribute of this Utility instance.
:return: The value of the email attribute of this Utility
instance.
:rtype: six.text_type
"""
return self._email
@property
def web(self):
"""
Gets the web attribute of this Utility instance.
:return: The value of the web attribute of this Utility
instance.
:rtype: six.text_type
"""
return self._web
| mit |
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerparsers/libs_telemac_caduc/parsers/parserStrings.py | 2 | 17182 | """@author Sebastien E. Bourban
"""
"""@note ... this work is based on a collaborative effort between
.________. ,--.
| | . ( (
|,-. / HR Wallingford EDF - LNHE / \_ \_/ .--.
/ \ / Howbery Park, 6, quai Watier \ ) /_ )
,. `' Wallingford, Oxfordshire 78401 Cedex `-'_ __ `--
/ \ / OX10 8BA, United Kingdom Chatou, France __/ \ \ `.
/ `-'| www.hrwallingford.com innovation.edf.com | ) ) )
!________! `--' `--
"""
"""@history 15/11/2011 -- Sebastien E. Bourban
"""
"""@history 25/08/2013 -- Sebastien E. Bourban and Juliette C. Parisi
Complete re-work of the definition of points and frames:
- points can include 3D points with vairous vertical references to planes
- frames can include ranges
These are mainly used to parse the keys "extract" and "time" set in the
XML files for the validation or the extraction of data.
"""
"""@history 23/09/2014 -- Sebastien E. Bourban
parseArrayGrid has been split to include both 2D and 3D grids
"""
"""@brief
Various method to parse strings into values, arrays, etc.
"""
# _____ ___________________________________________________
# ____/ Imports /__________________________________________________/
#
# ~~> dependencies towards standard python
import re
import sys
from fractions import gcd
# _____ ___________________________________________
# ____/ General Toolbox /__________________________________________/
#
rng2d = re.compile(r"(?P<n>[\d:+-]+)", re.I)
nod2d = re.compile(r"(?P<n>\d+)", re.I)
pnt2d = re.compile(r"\((?P<n>[^\(\)]+?)\)", re.I)
spl2d = re.compile(r"\{(?P<n>[^\(\)]+?)\}", re.I)
empty = re.compile(r"[\(\[][\)\]]", re.I)
plans = re.compile(r"\[(?P<n>[\d;,]+?)\]", re.I)
numbr = re.compile(r"(?P<n>[\d.+-dDeE]+?)")
simple = re.compile(r"(?P<n>([\d:+-]+|\([\d.+-dDeE]+?\)))")
complx = re.compile(
r"(?P<n>(\d+|\{[\d.+-dDeE]+?[;,][\d.+-dDeE]+?\}|\([\d.+-dDeE]+?[;,][\d.+-dDeE]+?\)|[\[\(][\]\)])((?=[^#@])|[#@][\[\(].+?[\]\)]|[#@][^,;\[\(\)\]]+))",
re.I,
)
squote = re.compile(r"(?P<squot>'.*?')") # ,re.I)
dquote = re.compile(r'(?P<dquot>".*?")') # ,re.I)
# gridxyn = re.compile(r'\((?P<minx>[\d.+-dDeE]+?)[;,](?P<miny>[\d.+-dDeE]+?)\)\((?P<maxx>[\d.+-dDeE]+?)[;,](?P<maxy>[\d.+-dDeE]+?)\)\{(?P<nx>\d+?)[;,](?P<ny>\d+?)\}',re.I)
# _____ ______________________________________
# ____/ General Time Jumping /_____________________________________/
#
def parseArrayFrame(s, size=-1):
"""
@brief Decoding structure all in order
The list of frames is delimiting points either by ',' or ';',
and the ranges by ':'
The output is an arry [..]. Each term is either:
(a) an integer, representing a frame or a node or a plane for instance
(b) a 1D-tuple of a real value, representing a time or a depth
(c) a 3D-tuple of integers, representing an array range [0;-1;1] by default
@examples of input / output
'5' => [5]
'[4]' => [4]
'[5,6,7,0]' => [5, 6, 7, 0]
'(5.6)' => [(5.6,)]
'(76);(4),[(3.3);4:14:2;0:6;8]'
=> [(76.0,), (4.0,), (3.3,), (4, 14, 2), (0, 6, 1), 8]
"""
frames = []
# ~~ Special deal of all times ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if s == "[]":
if size >= 0:
return [range(size)]
else:
return [[0, -1, 1]]
# ~~ Identify individual frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for node in re.findall(simple, s + ","):
# ~~> Is it a time (t) or a frame / range
t = node[1]
proci = re.match(rng2d, t)
procr = re.match(pnt2d, t)
if proci:
rt = proci.group("n").split(":")
if len(rt) == 1:
frameA = [int(rt[0])]
if size >= 0:
if frameA[0] < 0:
frameA[0] = max(0, size + frameA[0])
else:
frameA[0] = min(frameA[0], size - 1)
frameA = range(frameA[0], frameA[0] + 1, 1)
else:
if len(rt) == 2:
frameA = [int(rt[0]), int(rt[1]), 1]
if len(rt) == 3:
frameA = [int(rt[0]), int(rt[1]), int(rt[2])]
if size >= 0:
if frameA[0] < 0:
frameA[0] = max(0, size + frameA[0])
else:
frameA[0] = min(frameA[0], size - 1)
if frameA[1] < 0:
frameA[1] = max(0, size + frameA[1])
else:
frameA[1] = min(frameA[1], size - 1)
frameA = range(frameA[0], frameA[1] + 1, frameA[2])
elif procr:
frameA = (float(procr.group("n")),)
else:
print "... could not parse the point <" + node[0] + '> from the string "' + s + '"'
sys.exit(1)
# ~~> Final packing
frames.extend(frameA)
return frames
# _____ _____________________________________
# ____/ General Space Jumping /____________________________________/
#
def parseArrayPoint(s, size=-1):
"""
@brief Decoding structure all in order
The list of frames is delimiting points either by ',' or ';',
and the ranges by ':'
The output is an arry [..]. Each term is complicated ...
@examples of input / output
'5' => [(5, [(0, -1, 1)])] # either a 2D node value or a vertical 1D profile covering all planes above the 2D node
'(5)' => [(5, [(0, -1, 1)])]
'9@2,58#3,18,4#1,4#1,76@0.e-3,8@0.5'
=> [(9, ([2.0, -1],)), (58, [3]), (18, [(0, -1, 1)]), (4, [1]), (4, [1]), (76, ([0.0, -1],)), (8, ([0.5, -1],))]
'(4,5,6),[]#900'
=> [((4.0, 5.0, 6.0), [(0, -1, 1)]), ([], [900])]
'(3;4,5)#[]'
=> [(3, [(0, -1, 1)]), (4, [(0, -1, 1)]), (5, [(0, -1, 1)])
'(4;5,6)#[5:4;6;0:-1:2]'
=> [((4.0, 5.0, 6.0), [(5, 4, 1), 6, (0, -1, 2)])]
'9@2,58#3,18,(4;7)#1,4#1,(76;4)@1.e-1,[8@(0.5;0.7)'
=> [(9, ([2.0, -1],)), (58, [3]), (18, [(0, -1, 1)]), ((4.0, 7.0), [1]), (4, [1]), ((76.0, 4.0), ([0.1, -1],)), (8, ([0.5, -1],[0.7, -1]))]
'(4;5,6)#[5;6]'
=> [((4.0, 5.0, 6.0), [5, 6])]
'(4;5,6)@(-5#3;6)'
=> [((4.0, 5.0, 6.0), ([-5.0, 3], [6.0, -1]))]
"""
points = []
# ~~ Special deal of all points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if s == "":
if size >= 0:
return [([], range(size))]
else:
return [([], [0])]
# ~~ Identify individual points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for node in re.findall(complx, s + ","):
# ~~> Is it a point (x,y) or a node n
xy = node[1]
proci = re.match(nod2d, xy)
procs = re.match(spl2d, xy)
procr = re.match(pnt2d, xy)
proce = re.match(empty, xy)
if proci:
pointA = int(proci.group("n"))
elif procr:
xy = procr.group("n").replace(",", ";").split(";")
if len(xy) == 2:
pointA = (float(xy[0]), float(xy[1]))
# if len(xy) == 3: pointA = ( float(xy[0]),float(xy[1]),float(xy[2]) )
if len(xy) != 2:
print "... we are not allowing anything anything but a pair (x,y). You have: <" + node[
0
] + '>from the string "' + s + '"'
print " +> if you need (x,y,z) you should use a depth above plan 0: (x,y)@z#0"
sys.exit(1)
elif proce:
pointA = []
elif procs:
xy = procs.group("n").replace(",", ";").split(";")
if len(xy) == 2:
pointA = (int(xy[0]), int(xy[1]))
elif len(xy) == 3:
pointA = (int(xy[0]), int(xy[1]), int(xy[2]))
else:
print "... could not parse the number of re-sampling steps. You have: <" + node[
0
] + '>from the string "' + s + '"'
sys.exit(1)
points.append(pointA)
continue
else:
print "... could not parse the point <" + node[0] + '> from the string "' + s + '"'
sys.exit(1)
# ~~> Is it a depth d or a plane p or both
pointB = []
if node[2] != "":
tp = node[2][0]
zp = node[2][1:]
if tp == "#": # this is a plane or a series of planes
proci = re.match(rng2d, zp)
if proci:
zp = "[" + zp + "]"
pointB = parseArrayFrame(zp, size)
if tp == "@": # this is a depth or a series of depth, referenced by planes
procr = re.match(numbr, zp)
if procr:
zp = "(" + zp + ")"
procp = re.match(pnt2d, zp)
if procp:
pointB = []
for p in procp.group("n").replace(",", ";").split(";"):
if "#" in p:
a, b = p.split("#")
pointB.append([float(a), int(b)])
else:
pointB.append([float(p), -1]) # from the surface plane by default
pointB = tuple(pointB)
else:
if size >= 0:
pointB = range(size)
else:
pointB = [0, -1, 1]
# ~~> Final packing
points.append((pointA, pointB))
return points
def parseArrayGrid(s, size):
"""
@brief Decoding structure all in order
The grid is defined by two points and an array of re-sampling steps
The input 'size' is either:
- in 2D a pair of 2D points ( bottom-left, top-right )
- in 3D a pair of 2D points and a range of planes
The input 'size' is a pair of complex points (2D or 3D) and
a set of re-sampling numbers
The output is an arry [..]. Each term is complicated ...
"""
grids = []
minz = 0.0
maxz = 0.0
minp = 0
maxp = 0
if len(size) == 3:
(minx, miny), (maxx, maxy), (minp, maxp) = size
elif len(size) == 2:
if len(size[0]) == 2:
(minx, miny), (maxx, maxy) = size
else:
(minx, miny, minz), (maxx, maxy, maxz) = size
nz = maxp - minp
# ~~ Special deal of all points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if s == "[]":
dist = (maxy - miny + maxx - minx) / 20.0 # arbitrary value of 20 points
dist = min(dist, maxx - minx)
dist = min(dist, maxy - miny)
xo = (maxx + minx) / 2.0
yo = (maxy + miny) / 2.0
nx = max(2, int((maxx - minx) / dist))
ny = max(2, int((maxy - miny) / dist))
dist = min(dist, (maxx - minx) / (1.0 * nx))
dist = min(dist, (maxy - miny) / (1.0 * ny))
if len(size) == 2 and len(size[0]) == 2:
return [
[(xo - nx * dist / 2.0, yo - ny * dist / 2.0), (xo + nx * dist / 2.0, yo + ny * dist / 2.0), [nx, ny]]
]
elif len(size) == 2 and len(size[0]) == 3: # TODO: make sure you can suport this option
zo = (maxz + minz) / 2.0
nz = 10
dizt = (maxx - minx) / (1.0 * nz) # arbitrary value of 10 points
return [
[
(xo - nx * dist / 2.0, yo - ny * dist / 2.0, zo - nz * dizt / 2.0),
(xo + nx * dist / 2.0, yo + ny * dist / 2.0, zo + nz * dizt / 2.0),
[nx, ny, nz],
]
]
else:
return [
[
(xo - nx * dist / 2.0, yo - ny * dist / 2.0),
(xo + nx * dist / 2.0, yo + ny * dist / 2.0),
range(minp, maxp),
[nx, ny, nz],
]
]
# ~~ Decoding of user entrance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
s2g = parseArrayPoint(s)
if gcd(len(s2g), 3) != 3:
print '... could not parse your grid . "' + s + '". It should be triplets made of 2 points (;)(;) and an array of resampling steps {;}.'
sys.exit(1)
for i in range(len(s2g) / 3):
pta, ptb, np = s2g[3 * i : 3 * (i + 1)]
if len(np) == 2:
grids.append([pta[0], ptb[0], np])
elif len(np) == 3: # TODO: support a range of fixed depths as well as fixed planes
zp = "[" + str(pta[1][0]) + ":" + str(ptb[1][0]) + "]"
grids.append([pta[0], ptb[0], parseArrayFrame(zp, nz), np])
return grids
def parseArrayPaires(s):
paires = []
# ~~ Special deal of all paires ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if s == "":
return []
# ~~ Identify individual paires ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for node in re.findall(complx, s + ","):
# ~~> Is it a point (x,y) or a node n
xy = node[1]
proci = re.match(nod2d, xy)
procr = re.match(pnt2d, xy)
proce = re.match(empty, xy)
if proci:
pointA = int(proci.group("n"))
elif procr:
xy = procr.group("n").replace(",", ";").split(";")
if len(xy) == 2:
pointA = (float(xy[0]), float(xy[1]))
# if len(xy) == 3: pointA = ( float(xy[0]),float(xy[1]),float(xy[2]) )
if len(xy) != 2:
print "... we are not allowing anything anything but a pair (x,y). You have: <" + node[
0
] + '>from the string "' + s + '"'
print " +> if you need (x,y,z) you should use a depth above plan 0: (x,y)@z#0"
sys.exit(1)
elif proce:
pointA = []
else:
print "... could not parse the point <" + node[0] + '> from the string "' + s + '"'
sys.exit(1)
# ~~> Final packing
paires.append(pointA)
return paires
# _____ ________________________________________________
# ____/ MAIN CALL /_______________________________________________/
#
__author__ = "Sebastien E. Bourban"
__date__ = "$15-Nov-2011 08:51:29$"
if __name__ == "__main__":
# ~~ space ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if True: # S.E.Bourban completed testing on August 25, 2013
print "\n\n"
strings = [
"5",
"9@2,58#3,18,4#1,4#1,76@0.e-3,8@0.5",
"(4,5,6),[]#900",
"(3;4,5)#[]",
"(4;5,6)#[5:4;6;0:-1:2]",
"(5)",
"9@2,58#3,18,(4;7)#1,4#1,(76;4)@1.e-1,[8@(0.5;0.7)",
"(4;5,6)#[5;6]",
"(4;5,6)@(-5#3;6)",
]
for s in strings:
print s, " => ", parseArrayPoint(s)
# ~~ time ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if True: # S.E.Bourban completed testing on August 25, 2013
print "\n\n"
strings = ["5", "[4]", "[5,6,7,0]", "(5.6)", "(76);(4),[(3.3);4:14:2;0:6;8]"]
for s in strings:
print s, " => ", parseArrayFrame(s)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print "\n\nMy work is done\n\n"
sys.exit(0)
"""
This will parse pairs of values from a string and
convert it into a numpy array.
- Paires are surounded by square bracketes.
- Paires are joined up with ';'
@examples: [10;1][0;1]
sqr_brack = re.compile(r'[,;]?\s*?\[(?P<brack>[\d;.\s+-dDeEpz]*?)\]',re.I)
# (\+|\-)? to capture the sign if there ... different from the parserFORTRAN version
var_doublep = re.compile(r'(?P<number>(\+|\-)?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))',re.I)
var_integer = re.compile(r'(?P<number>(\+|\-)?(|[^a-zA-Z(,])(?:(\d+)(\b|[^a-zA-Z,)])))',re.I)
var_dist = re.compile(r'd(?P<number>(\+|\-)?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))',re.I)
var_cote = re.compile(r'z(?P<number>(\+|\-)?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))',re.I)
var_plan = re.compile(r'p(?P<number>(\+|\-)?(|[^a-zA-Z(,])(?:(\d+)(\b|[^a-zA-Z,)])))',re.I)
def parseArrayPaires(s):
z = [] # /!\ only pairs of points allowed for now
for brack in re.findall(sqr_brack,s):
p = []
for v in brack.split(';'): # /!\ this also work for one value
proci = re.match(var_integer,v)
procd = re.match(var_doublep,v)
if procd:
p.append(float(procd.group('number')))
elif proci:
p.append(int(proci.group('number')))
else:
if re.match(var_dist,v) or re.match(var_cote,v) or re.match(var_plan,v): p.append(v)
else:
print '... could not parse the array: ' + s
sys.exit(1)
z.append(p)
return z
"""
| gpl-3.0 |
oscarolar/odoo | addons/base_gengo/__openerp__.py | 68 | 2119 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Translations through Gengo API',
'version': '0.1',
'category': 'Tools',
'description': """
Automated Translations through Gengo API
========================================
This module will install passive scheduler job for automated translations
using the Gengo API. To activate it, you must
1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`
2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard.
This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': [
'gengo_sync_schedular_data.xml',
'ir_translation.xml',
'res_company_view.xml',
'wizard/base_gengo_translations_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nvoron23/scipy | scipy/_lib/_tmpdirs.py | 126 | 2438 | ''' Contexts for *with* statement providing temporary directories
'''
from __future__ import division, print_function, absolute_import
import os
from contextlib import contextmanager
from shutil import rmtree
from tempfile import mkdtemp
@contextmanager
def tempdir():
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager.
Upon exiting the context, the directory and everthing contained
in it are removed.
Examples
--------
>>> import os
>>> with tempdir() as tmpdir:
... fname = os.path.join(tmpdir, 'example_file.txt')
... with open(fname, 'wt') as fobj:
... _ = fobj.write('a string\\n')
>>> os.path.exists(tmpdir)
False
"""
d = mkdtemp()
yield d
rmtree(d)
@contextmanager
def in_tempdir():
''' Create, return, and change directory to a temporary directory
Examples
--------
>>> import os
>>> my_cwd = os.getcwd()
>>> with in_tempdir() as tmpdir:
... _ = open('test.txt', 'wt').write('some text')
... assert os.path.isfile('test.txt')
... assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
>>> os.path.exists(tmpdir)
False
>>> os.getcwd() == my_cwd
True
'''
pwd = os.getcwd()
d = mkdtemp()
os.chdir(d)
yield d
os.chdir(pwd)
rmtree(d)
@contextmanager
def in_dir(dir=None):
""" Change directory to given directory for duration of ``with`` block
Useful when you want to use `in_tempdir` for the final test, but
you are still debugging. For example, you may want to do this in the end:
>>> with in_tempdir() as tmpdir:
... # do something complicated which might break
... pass
But indeed the complicated thing does break, and meanwhile the
``in_tempdir`` context manager wiped out the directory with the
temporary files that you wanted for debugging. So, while debugging, you
replace with something like:
>>> with in_dir() as tmpdir: # Use working directory by default
... # do something complicated which might break
... pass
You can then look at the temporary file outputs to debug what is happening,
fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
"""
cwd = os.getcwd()
if dir is None:
yield cwd
return
os.chdir(dir)
yield dir
os.chdir(cwd)
| bsd-3-clause |
anujbhan/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/samples/getProjects.py | 6 | 2076 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, ConfigParser
sys.path.append('../lib')
from apache.airavata.api import Airavata
from apache.airavata.api.ttypes import *
from apache.airavata.model.workspace.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Read Airavata Client properties
airavataConfig = ConfigParser.RawConfigParser()
airavataConfig.read('../conf/airavata-client.properties')
# Create a socket to the Airavata Server
transport = TSocket.TSocket(airavataConfig.get('AiravataServer', 'host'), airavataConfig.get('AiravataServer', 'port'))
# Use Buffered Protocol to speedup over raw sockets
transport = TTransport.TBufferedTransport(transport)
# Airavata currently uses Binary Protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a Airavata client to use the protocol encoder
airavataClient = Airavata.Client(protocol)
# Connect to Airavata Server
transport.open()
projectLists = airavataClient.getAllUserProjects("sdsc", "smarru");
print projectLists
# Close Connection to Airavata Server
transport.close()
except Thrift.TException, tx:
print '%s' % (tx.message)
| apache-2.0 |
yfried/ansible | lib/ansible/modules/network/system/net_system.py | 104 | 3068 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_system
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage the system attributes on network devices
description:
- This module provides declarative management of node system attributes
on network devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
net_system:
hostname: ios01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
net_system:
state: absent
- name: configure DNS lookup sources
net_system:
lookup_source: MgmtEth0/0/CPU0/0
- name: configure name servers
net_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- hostname ios01
- ip domain name test.example.com
"""
| gpl-3.0 |
gordon-elliott/glod | src/glod/in_out/organisation.py | 1 | 2604 | __copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
import logging
from datetime import datetime
from glod.db.person import Person
from glod.db.organisation import Organisation, OrganisationCategory, OrganisationStatus
from glod.model.communication_permission import CommunicationPermission
from glod.db.organisation_address import OrganisationAddress
LOG = logging.getLogger(__file__)
INITIAL_GDPR_SURVEY = datetime(2018, 10, 30)
TRUE_STRINGS = ("true", "True", "TRUE", "yes", "Yes", "YES", "1")
IS_PRIMARY = 'primary'
def _reorganise_parishioner(parishioner, address_map, household_map):
new_entities = []
parishioner_status = parishioner.status.lower()
if parishioner_status == 'foreign list':
organisation_status = OrganisationStatus.Active
organisation_category = OrganisationCategory.NonLocalHousehold
else:
organisation_status = OrganisationStatus.Active if parishioner_status == 'active' else OrganisationStatus.Inactive
organisation_category = OrganisationCategory.Household
household_ref_no = parishioner.household_ref_no
if household_ref_no in household_map:
household = household_map[household_ref_no]
else:
household = Organisation(
parishioner.surname,
organisation_category,
organisation_status,
household_ref_no,
)
address = address_map[household_ref_no]
oa_link = OrganisationAddress(household, address)
household_map[household_ref_no] = household
new_entities = [household, oa_link]
person = Person(
household,
parishioner.surname,
parishioner.first_name,
title=parishioner.title,
mobile=parishioner.mobile,
other_phone=parishioner.other,
email=parishioner.email,
parishioner_reference_no=parishioner.reference_no,
)
communication_preferences = CommunicationPermission(
person,
parishioner.main_contact == IS_PRIMARY,
INITIAL_GDPR_SURVEY,
parishioner.by_email in TRUE_STRINGS,
parishioner.by_phone in TRUE_STRINGS,
parishioner.by_post in TRUE_STRINGS,
parishioner.news in TRUE_STRINGS,
parishioner.finance in TRUE_STRINGS,
)
new_entities += [person, communication_preferences]
return new_entities
def reorganise_parishioners(session, parishioners, address_map):
household_map = {}
for parishioner in parishioners:
new_entities = _reorganise_parishioner(parishioner, address_map, household_map)
session.add_all(new_entities)
| mit |
larroy/mxnet | python/mxnet/kvstore/base.py | 3 | 15341 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Key value store interface of MXNet for parameter synchronization."""
from array import array
import ctypes
import warnings
from ..ndarray import NDArray
from ..base import _LIB, c_str_array, c_handle_array, c_array, c_array_buf, c_str
from ..base import check_call, string_types
from ..base import KVStoreHandle
from ..profiler import set_kvstore_handle
__all__ = ['create', 'KVStoreBase']
def _ctype_key_value(keys, vals):
"""Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys)
def _ctype_dict(param_dict):
"""Returns ctype arrays for keys and values(converted to strings) in a dictionary"""
assert(isinstance(param_dict, dict)), \
"unexpected type for param_dict: " + str(type(param_dict))
c_keys = c_array(ctypes.c_char_p, [c_str(k) for k in param_dict.keys()])
c_vals = c_array(ctypes.c_char_p, [c_str(str(v)) for v in param_dict.values()])
return (c_keys, c_vals)
class KVStoreBase(object):
"""An abstract key-value store interface for data parallel training."""
def broadcast(self, key, value, out, priority=0):
""" Broadcast the `value` NDArray at rank 0 to all ranks,
and store the result in `out`
Parameters
----------
key : str or int
The key.
value : NDArray
The value corresponding to the key to broadcast
out : NDArray, or list of NDArray
Values corresponding to the key to store the result
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
raise NotImplementedError()
def pushpull(self, key, value, out=None, priority=0):
""" Performs push and pull a single value or a sequence of values from the store.
This function is coalesced form of push and pull operations.
`value` is pushed to the kvstore server for summation with the specified keys,
and the results are pulled from the server to `out`. If `out` is not specified
the pulled values are written to `value`.
Note that for allreduce based approaches such as horovod, there is no notion of
server or store. This function performs allreduce.
Parameters
----------
key : str or int
The key.
value : NDArray, or list of NDArray
Values corresponding to the keys.
out: NDArray, or list of NDArray
Values corresponding to the key.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
raise NotImplementedError()
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : KVStoreBase
The new optimizer for the store
"""
raise NotImplementedError()
OPTIMIZER = 'optimizer'
def is_capable(self, capability):
"""Queries if the KVStore type supports certain capability, such as optimizer algorithm,
gradient compression, sparsity, etc.
Parameters
----------
capability: str
The capability to query
Returns
-------
result : bool
Whether the capability is supported or not.
"""
raise NotImplementedError()
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
raise NotImplementedError()
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
raise NotImplementedError()
@property
def type(self):
""" Returns the type of this kvstore backend.
Returns
-------
type : str
the string type
"""
raise NotImplementedError()
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
raise NotImplementedError()
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
raise NotImplementedError()
kv_registry = {}
@staticmethod
def register(klass):
"""Registers a new KVStore.
Once a kvstore is registered, we can create an instance of this
kvstore with `create` later.
Examples
--------
>>> @mx.kvstore.KVStoreBase.register
... class MyKVStore(mx.kvstore.KVStoreBase):
... pass
>>> kv = mx.kv.create('MyKVStore')
>>> print(type(kv))
<class '__main__.MyKVStore'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in KVStoreBase.kv_registry:
warnings.warn('WARNING: New kvstore %s.%s is overriding '
'existing kvstore %s.%s' %
(klass.__module__, klass.__name__,
KVStoreBase.kv_registry[name].__module__,
KVStoreBase.kv_registry[name].__name__))
KVStoreBase.kv_registry[name] = klass
return klass
@KVStoreBase.register
class TestStore(KVStoreBase):
"""A key-value store for testing."""
def broadcast(self, key, value, out, priority=0):
""" Broadcast the `value` NDArray at rank 0 to all ranks,
and store the result in `out`
Parameters
----------
key : str or int
The key.
value : NDArray
The value corresponding to the key to broadcast
out : NDArray, or list of NDArray
Values corresponding to the key to store the result
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = value
def pushpull(self, key, value, out=None, priority=0):
""" Performs push and pull a single value or a sequence of values from the store.
This function is coalesced form of push and pull operations.
`value` is pushed to the kvstore server for summation with the specified keys,
and the results are pulled from the server to `out`. If `out` is not specified
the pulled values are written to `value`.
Parameters
----------
key : str or int
The key.
value : NDArray, or list of NDArray
Values corresponding to the keys.
out: NDArray, or list of NDArray
Values corresponding to the key.
priority : int, optional
The priority of the operation.
Higher priority operations are likely to be executed before other actions.
"""
ctx = value[0].context
if isinstance(value, NDArray):
if out is not None:
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = value
else:
reduced_value = sum([val.as_in_context(ctx) for val in value])
if out is None:
for v in value:
v[:] = reduced_value
else:
out = out if isinstance(out, list) else [out]
for o in out:
o[:] = reduced_value
@staticmethod
def is_capable(capability):
"""Queries if the KVStore type supports certain capability, such as optimizer algorithm,
gradient compression, sparsity, etc.
Parameters
----------
capability: str
The capability to query
Returns
-------
result : bool
Whether the capability is supported or not.
"""
if capability.lower() == KVStoreBase.OPTIMIZER:
return False
else:
raise ValueError('Unknown capability: {}'.format(capability))
@property
def type(self):
""" Returns the type of this kvstore.
Returns
-------
type : str
the string type
"""
return 'teststore'
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
return 0
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
return 1
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : KVStoreBase
The new optimizer for the store
"""
raise NotImplementedError()
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
raise NotImplementedError()
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
raise NotImplementedError()
def create(name='local'):
"""Creates a new KVStore.
For single machine training, there are two commonly used types:
``local``: Copies all gradients to CPU memory and updates weights there.
``device``: Aggregates gradients and updates weights on GPUs. With this setting,
the KVStore also attempts to use GPU peer-to-peer communication,
potentially accelerating the communication.
For distributed training, KVStore also supports a number of types:
``dist_sync``: Behaves similarly to ``local`` but with one major difference.
With ``dist_sync``, batch-size now means the batch size used on each machine.
So if there are ``n`` machines and we use batch size ``b``,
then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
to ``device`` vs ``local``.
``dist_async``: Performs asynchronous updates.
The weights are updated whenever gradients are received from any machine.
No two updates happen on the same weight at the same time. However, the order is not
guaranteed.
Parameters
----------
name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async', 'horovod'}
The type of KVStore.
Returns
-------
kv : KVStoreBase
The created KVStore.
"""
if not isinstance(name, string_types):
raise TypeError('name must be a string')
name = name.lower()
# first lookup the registry
if name in KVStoreBase.kv_registry:
return KVStoreBase.kv_registry[name]()
else:
# fall back to the native kvstore implementation
handle = KVStoreHandle()
check_call(_LIB.MXKVStoreCreate(c_str(name),
ctypes.byref(handle)))
from .kvstore import KVStore
kv = KVStore(handle)
set_kvstore_handle(kv.handle)
return kv
| apache-2.0 |
hanlind/nova | nova/network/security_group/security_group_base.py | 2 | 9323 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
from six.moves import urllib
from nova import exception
from nova.i18n import _
from nova.objects import security_group as security_group_obj
from nova import utils
class SecurityGroupBase(object):
def parse_cidr(self, cidr):
if cidr:
try:
cidr = encodeutils.safe_decode(urllib.parse.unquote(cidr))
except Exception as e:
self.raise_invalid_cidr(cidr, e)
if not utils.is_valid_cidr(cidr):
self.raise_invalid_cidr(cidr)
return cidr
else:
return '0.0.0.0/0'
@staticmethod
def new_group_ingress_rule(grantee_group_id, protocol, from_port,
to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, group_id=grantee_group_id)
@staticmethod
def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, cidr=grantee_cidr)
@staticmethod
def _new_ingress_rule(ip_protocol, from_port, to_port,
group_id=None, cidr=None):
values = {}
if group_id:
values['group_id'] = group_id
# Open everything if an explicit port range or type/code are not
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
and to_port is None):
from_port = 1
to_port = 65535
elif cidr:
values['cidr'] = cidr
if ip_protocol and from_port is not None and to_port is not None:
ip_protocol = str(ip_protocol)
try:
# Verify integer conversions
from_port = int(from_port)
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
raise exception.InvalidInput(reason=_("Type and"
" Code must be integers for ICMP protocol type"))
else:
raise exception.InvalidInput(reason=_("To and From ports "
"must be integers"))
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid %s ports should"
" be between 1-65535"
% ip_protocol.upper())
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if cidr:
return None
return values
def create_security_group_rule(self, context, security_group, new_rule):
if self.rule_exists(security_group, new_rule):
msg = (_('This rule already exists in group %s') %
new_rule['parent_group_id'])
self.raise_group_already_exists(msg)
return self.add_rules(context, new_rule['parent_group_id'],
security_group['name'],
[new_rule])[0]
def rule_exists(self, security_group, new_rule):
"""Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != new_rule.get(key):
break
else:
return rule.get('id') or True
return False
def validate_property(self, value, property, allowed):
pass
def ensure_default(self, context):
pass
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
pass
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
pass
def populate_security_groups(self, security_groups):
"""Build and return a SecurityGroupList.
:param security_groups: list of requested security group names or uuids
:type security_groups: list
:returns: nova.objects.security_group.SecurityGroupList
"""
if not security_groups:
# Make sure it's an empty SecurityGroupList and not None
return security_group_obj.SecurityGroupList()
return security_group_obj.make_secgroup_list(security_groups)
def create_security_group(self, context, name, description):
raise NotImplementedError()
def update_security_group(self, context, security_group,
name, description):
raise NotImplementedError()
def get(self, context, name=None, id=None, map_exception=False):
raise NotImplementedError()
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
raise NotImplementedError()
def destroy(self, context, security_group):
raise NotImplementedError()
def add_rules(self, context, id, name, vals):
raise NotImplementedError()
def remove_rules(self, context, security_group, rule_ids):
raise NotImplementedError()
def get_rule(self, context, id):
raise NotImplementedError()
def get_instance_security_groups(self, context, instance, detailed=False):
raise NotImplementedError()
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to add
"""
raise NotImplementedError()
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to remove
"""
raise NotImplementedError()
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
| apache-2.0 |
zhouzhenghui/python-for-android | python-build/python-libs/gdata/build/lib/gdata/oauth/rsa.py | 225 | 4528 | #!/usr/bin/python
"""
requires tlslite - http://trevp.net/tlslite/
"""
import binascii
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
# XXX andy: ugly local import due to module name, oauth.oauth
import gdata.oauth as oauth
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
def get_name(self):
return "RSA-SHA1"
def _fetch_public_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
# (2) fetch via http using a url provided by the requester
# (3) some sort of specific discovery code based on request
#
# either way should return a string representation of the certificate
raise NotImplementedError
def _fetch_private_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
#
# either way should return a string representation of the certificate
raise NotImplementedError
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
oauth.escape(oauth_request.get_normalized_http_method()),
oauth.escape(oauth_request.get_normalized_http_url()),
oauth.escape(oauth_request.get_normalized_parameters()),
)
key = ''
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the private key cert based on the request
cert = self._fetch_private_cert(oauth_request)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Convert base_string to bytes
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
def check_signature(self, oauth_request, consumer, token, signature):
decoded_sig = base64.b64decode(signature);
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the public key cert based on the request
cert = self._fetch_public_cert(oauth_request)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
ok = publickey.hashAndVerify(decoded_sig, base_string)
return ok
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
def _fetch_public_cert(self, oauth_request):
cert = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
return cert
def _fetch_private_cert(self, oauth_request):
cert = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
return cert
| apache-2.0 |
h3biomed/ansible | lib/ansible/modules/storage/zfs/zfs.py | 55 | 9554 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs).
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
required: true
origin:
description:
- Snapshot from which to create a clone.
key_value:
description:
- (**DEPRECATED**) This will be removed in Ansible-2.9. Set these values in the
- C(extra_zfs_properties) option instead.
- The C(zfs) module takes key=value pairs for zfs properties to be set.
- See the zfs(8) man page for more information.
extra_zfs_properties:
description:
- A dictionary of zfs properties to be set.
- See the zfs(8) man page for more information.
version_added: "2.5"
author:
- Johan Wiren (@johanwiren)
'''
EXAMPLES = '''
- name: Create a new file system called myfs in pool rpool with the setuid property turned off
zfs:
name: rpool/myfs
state: present
extra_zfs_properties:
setuid: off
- name: Create a new volume called myvol in pool rpool.
zfs:
name: rpool/myvol
state: present
extra_zfs_properties:
volsize: 10M
- name: Create a snapshot of rpool/myfs file system.
zfs:
name: rpool/myfs@mysnapshot
state: present
- name: Create a new file system called myfs2 with snapdir enabled
zfs:
name: rpool/myfs2
state: present
extra_zfs_properties:
snapdir: enabled
- name: Create a new file system by cloning a snapshot
zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
- name: Destroy a filesystem
zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
origin = self.module.params.get('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if properties:
for prop, value in properties.items():
if prop == 'volsize':
cmd += ['-V', value]
elif prop == 'volblocksize':
cmd += ['-b', value]
else:
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin and action == 'clone':
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
origin=dict(type='str', default=None),
# createparent is meaningless after 2.3, but this shouldn't
# be removed until check_invalid_arguments is.
createparent=dict(type='bool', default=None),
extra_zfs_properties=dict(type='dict', default={}),
),
supports_check_mode=True,
# Remove this in Ansible 2.9
check_invalid_arguments=False,
)
state = module.params.get('state')
name = module.params.get('name')
if module.params.get('origin') and '@' in name:
module.fail_json(msg='cannot specify origin when operating on a snapshot')
# The following is deprecated. Remove in Ansible 2.9
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.items():
# All freestyle params are zfs properties
if prop not in module.argument_spec:
if isinstance(value, bool):
if value is True:
properties[prop] = 'on'
else:
properties[prop] = 'off'
else:
properties[prop] = value
if properties:
module.deprecate('Passing zfs properties as arbitrary parameters to the zfs module is'
' deprecated. Send them as a dictionary in the extra_zfs_properties'
' parameter instead.', version='2.9')
# Merge, giving the module_params precedence
for prop, value in module.params['extra_zfs_properties'].items():
properties[prop] = value
module.params['extra_zfs_properties'] = properties
# End deprecated section
# Reverse the boolification of zfs properties
for prop, value in module.params['extra_zfs_properties'].items():
if isinstance(value, bool):
if value is True:
module.params['extra_zfs_properties'][prop] = 'on'
else:
module.params['extra_zfs_properties'][prop] = 'off'
else:
module.params['extra_zfs_properties'][prop] = value
result = dict(
name=name,
state=state,
)
zfs = Zfs(module, name, module.params['extra_zfs_properties'])
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ikottman/alexa-skills | office_hours/dependencies/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| unlicense |
tomjelinek/pcs | pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py | 3 | 27358 | from unittest import TestCase
from lxml import etree
from pcs.common.pacemaker.rule import (
CibRuleDateCommonDto,
CibRuleExpressionDto,
)
from pcs.common.types import (
CibRuleInEffectStatus,
CibRuleExpressionType,
)
from pcs.lib.cib.rule import (
RuleInEffectEval,
rule_element_to_dto,
)
class RuleInEffectEvalMock(RuleInEffectEval):
def __init__(self, mock_data=None):
self._mock_data = mock_data or dict()
def get_rule_status(self, rule_id):
return self._mock_data.get(rule_id, CibRuleInEffectStatus.UNKNOWN)
def get_in_effect_eval(mock_data=None):
return RuleInEffectEvalMock(mock_data)
class ExpressionToDto(TestCase):
def test_defined(self):
xml = etree.fromstring(
"""
<rule id="my-id">
<expression id="my-id-expr"
attribute="pingd" operation="defined"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-expr",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"attribute": "pingd", "operation": "defined"},
None,
None,
[],
"defined pingd",
),
],
"defined pingd",
),
)
def test_value_comparison(self):
xml = etree.fromstring(
"""
<rule id="my-id">
<expression id="my-id-expr"
attribute="my-attr" operation="eq" value="my value"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-expr",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"attribute": "my-attr",
"operation": "eq",
"value": "my value",
},
None,
None,
[],
'my-attr eq "my value"',
),
],
'my-attr eq "my value"',
),
)
def test_value_comparison_with_type(self):
xml = etree.fromstring(
"""
<rule id="my-id">
<expression id="my-id-expr"
attribute="foo" operation="gt" type="version" value="1.2.3"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-expr",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"attribute": "foo",
"operation": "gt",
"type": "version",
"value": "1.2.3",
},
None,
None,
[],
"foo gt version 1.2.3",
),
],
"foo gt version 1.2.3",
),
)
class DateExpressionToDto(TestCase):
def test_gt(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr"
operation="gt" start="2014-06-26"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"operation": "gt", "start": "2014-06-26"},
None,
None,
[],
"date gt 2014-06-26",
),
],
"date gt 2014-06-26",
),
)
def test_lt(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr"
operation="lt" end="2014-06-26"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"operation": "lt", "end": "2014-06-26"},
None,
None,
[],
"date lt 2014-06-26",
),
],
"date lt 2014-06-26",
),
)
def test_datespec(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr" operation="date_spec">
<date_spec id="rule-expr-datespec"
hours="1-14" monthdays="20-30" months="1"
/>
</date_expression>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"operation": "date_spec"},
CibRuleDateCommonDto(
"rule-expr-datespec",
{
"hours": "1-14",
"monthdays": "20-30",
"months": "1",
},
),
None,
[],
"date-spec hours=1-14 monthdays=20-30 months=1",
),
],
"date-spec hours=1-14 monthdays=20-30 months=1",
),
)
def test_inrange_start_end(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr"
operation="in_range" start="2014-06-26" end="2014-07-26"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"operation": "in_range",
"start": "2014-06-26",
"end": "2014-07-26",
},
None,
None,
[],
"date in_range 2014-06-26 to 2014-07-26",
),
],
"date in_range 2014-06-26 to 2014-07-26",
),
)
def test_inrange_end(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr"
operation="in_range" end="2014-07-26"
/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"operation": "in_range", "end": "2014-07-26"},
None,
None,
[],
"date in_range to 2014-07-26",
),
],
"date in_range to 2014-07-26",
),
)
def test_inrange_start_duration(self):
xml = etree.fromstring(
"""
<rule id="rule">
<date_expression id="rule-expr"
operation="in_range" start="2014-06-26"
>
<duration id="rule-expr-duration" years="1"/>
</date_expression>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"rule-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"operation": "in_range",
"start": "2014-06-26",
},
None,
CibRuleDateCommonDto(
"rule-expr-duration",
{"years": "1"},
),
[],
"date in_range 2014-06-26 to duration years=1",
),
],
"date in_range 2014-06-26 to duration years=1",
),
)
class OpExpressionToDto(TestCase):
def test_minimal(self):
xml = etree.fromstring(
"""
<rule id="my-id">
<op_expression id="my-id-op" name="start" />
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-op",
CibRuleExpressionType.OP_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"name": "start"},
None,
None,
[],
"op start",
),
],
"op start",
),
)
def test_interval(self):
xml = etree.fromstring(
"""
<rule id="my-id">
<op_expression id="my-id-op" name="start" interval="2min" />
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-op",
CibRuleExpressionType.OP_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"name": "start", "interval": "2min"},
None,
None,
[],
"op start interval=2min",
),
],
"op start interval=2min",
),
)
class ResourceExpressionToDto(TestCase):
def test_success(self):
test_data = [
# ((class, provider, type), output)
((None, None, None), "::"),
(("ocf", None, None), "ocf::"),
((None, "pacemaker", None), ":pacemaker:"),
((None, None, "Dummy"), "::Dummy"),
(("ocf", "pacemaker", None), "ocf:pacemaker:"),
(("ocf", None, "Dummy"), "ocf::Dummy"),
((None, "pacemaker", "Dummy"), ":pacemaker:Dummy"),
(("ocf", "pacemaker", "Dummy"), "ocf:pacemaker:Dummy"),
]
for in_data, out_data in test_data:
with self.subTest(in_data=in_data):
attrs = {}
if in_data[0] is not None:
attrs["class"] = in_data[0]
if in_data[1] is not None:
attrs["provider"] = in_data[1]
if in_data[2] is not None:
attrs["type"] = in_data[2]
attrs_str = " ".join(
[f"{name}='{value}'" for name, value in attrs.items()]
)
xml = etree.fromstring(
f"""
<rule id="my-id">
<rsc_expression id="my-id-expr" {attrs_str}/>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"my-id",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{},
None,
None,
[
CibRuleExpressionDto(
"my-id-expr",
CibRuleExpressionType.RSC_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
attrs,
None,
None,
[],
f"resource {out_data}",
),
],
f"resource {out_data}",
),
)
class RuleToDto(TestCase):
def test_complex_rule(self):
xml = etree.fromstring(
"""
<rule id="complex" boolean-op="or" score="INFINITY">
<rule id="complex-rule-1" boolean-op="and" score="0">
<date_expression id="complex-rule-1-expr"
operation="date_spec"
>
<date_spec id="complex-rule-1-expr-datespec"
weekdays="1-5" hours="12-23"
/>
</date_expression>
<date_expression id="complex-rule-1-expr-1"
operation="in_range" start="2014-07-26"
>
<duration id="complex-rule-1-expr-1-durat" months="1"/>
</date_expression>
</rule>
<rule id="complex-rule" boolean-op="and" score="0">
<expression id="complex-rule-expr-1"
attribute="foo" operation="gt" type="version" value="1.2"
/>
<expression id="complex-rule-expr"
attribute="#uname" operation="eq" value="node3 4"
/>
<expression id="complex-rule-expr-2"
attribute="#uname" operation="eq" value="nodeA"
/>
</rule>
</rule>
"""
)
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(), xml),
CibRuleExpressionDto(
"complex",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{"boolean-op": "or", "score": "INFINITY"},
None,
None,
[
CibRuleExpressionDto(
"complex-rule-1",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{"boolean-op": "and", "score": "0"},
None,
None,
[
CibRuleExpressionDto(
"complex-rule-1-expr",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"operation": "date_spec"},
CibRuleDateCommonDto(
"complex-rule-1-expr-datespec",
{"hours": "12-23", "weekdays": "1-5"},
),
None,
[],
"date-spec hours=12-23 weekdays=1-5",
),
CibRuleExpressionDto(
"complex-rule-1-expr-1",
CibRuleExpressionType.DATE_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"operation": "in_range",
"start": "2014-07-26",
},
None,
CibRuleDateCommonDto(
"complex-rule-1-expr-1-durat",
{"months": "1"},
),
[],
"date in_range 2014-07-26 to duration months=1",
),
],
"date-spec hours=12-23 weekdays=1-5 and date in_range "
"2014-07-26 to duration months=1",
),
CibRuleExpressionDto(
"complex-rule",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{"boolean-op": "and", "score": "0"},
None,
None,
[
CibRuleExpressionDto(
"complex-rule-expr-1",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"attribute": "foo",
"operation": "gt",
"type": "version",
"value": "1.2",
},
None,
None,
[],
"foo gt version 1.2",
),
CibRuleExpressionDto(
"complex-rule-expr",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"attribute": "#uname",
"operation": "eq",
"value": "node3 4",
},
None,
None,
[],
'#uname eq "node3 4"',
),
CibRuleExpressionDto(
"complex-rule-expr-2",
CibRuleExpressionType.EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{
"attribute": "#uname",
"operation": "eq",
"value": "nodeA",
},
None,
None,
[],
"#uname eq nodeA",
),
],
'foo gt version 1.2 and #uname eq "node3 4" and #uname '
"eq nodeA",
),
],
"(date-spec hours=12-23 weekdays=1-5 and date in_range "
"2014-07-26 to duration months=1) or (foo gt version 1.2 and "
'#uname eq "node3 4" and #uname eq nodeA)',
),
)
class InEffectFlagInDto(TestCase):
def test_success(self):
xml = etree.fromstring(
"""
<rule id="rule1" boolean-op="or" score="INFINITY">
<rule id="rule2" boolean-op="and" score="0">
<rule id="rule3" boolean-op="or" score="0">
<op_expression id="id1" name="start" />
</rule>
<rsc_expression id="id2" type="Dummy" />
</rule>
<rule id="rule4" boolean-op="and" score="0">
<rsc_expression id="id3" type="Stateful" />
</rule>
</rule>
"""
)
rules_status = {
"rule1": CibRuleInEffectStatus.UNKNOWN,
"rule2": CibRuleInEffectStatus.EXPIRED,
"rule3": CibRuleInEffectStatus.IN_EFFECT,
"rule4": CibRuleInEffectStatus.NOT_YET_IN_EFFECT,
}
self.assertEqual(
rule_element_to_dto(get_in_effect_eval(rules_status), xml),
CibRuleExpressionDto(
"rule1",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.UNKNOWN,
{"boolean-op": "or", "score": "INFINITY"},
None,
None,
[
CibRuleExpressionDto(
"rule2",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.EXPIRED,
{"boolean-op": "and", "score": "0"},
None,
None,
[
CibRuleExpressionDto(
"rule3",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.IN_EFFECT,
{"boolean-op": "or", "score": "0"},
None,
None,
[
CibRuleExpressionDto(
"id1",
CibRuleExpressionType.OP_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"name": "start"},
None,
None,
[],
"op start",
),
],
"op start",
),
CibRuleExpressionDto(
"id2",
CibRuleExpressionType.RSC_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"type": "Dummy"},
None,
None,
[],
"resource ::Dummy",
),
],
"(op start) and resource ::Dummy",
),
CibRuleExpressionDto(
"rule4",
CibRuleExpressionType.RULE,
CibRuleInEffectStatus.NOT_YET_IN_EFFECT,
{"boolean-op": "and", "score": "0"},
None,
None,
[
CibRuleExpressionDto(
"id3",
CibRuleExpressionType.RSC_EXPRESSION,
CibRuleInEffectStatus.UNKNOWN,
{"type": "Stateful"},
None,
None,
[],
"resource ::Stateful",
),
],
"resource ::Stateful",
),
],
"((op start) and resource ::Dummy) or (resource ::Stateful)",
),
)
| gpl-2.0 |
DANCEcollaborative/forum-xblock | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/contrib/localflavor/de_CH/formats.py | 84 | 1633 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u'\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
rwaldron/three.js | utils/exporters/cinema4d/export_to_three.js.py | 1 | 8326 | '''
author : "George Profenza"
url : ("disturb", "disturbmedia.com/blog","My blog, http://tomaterial.blogspot.com")
Export meshes the three.js 3D Engine by mr.doob's et al.
More details on the engine here:
https://github.com/mrdoob/three.js
Currently supports UVs. If the model doesn't display correctly
you might need to reverse some normals/do some cleanup.
Also, if you use Selection Tags and basic ColorMaterials,
the colours will be picked up as face colors. Call autoColor() on the
model you use for this.
The mesh transformations(position, rotation, scale) are saved
and you can get them using: getPosition(), getRotation() and getScale()
each returning a THREE.Vector3
In short
var myGeom = new myC4DGeom();
var myModel = new THREE.Mesh( myGeom, new THREE.MeshFaceMaterial());
//set transforms
model.position = myGeom.getPosition()
model.rotation = myGeom.getRotation()
model.scale = myGeom.getScale()
//set selection tags colours
myGeom.autoColor()
More details on this exporter and more js examples here:
https://github.com/orgicus/three.js
Have fun!
This script requires Cinema 4D R11.5 minimum and the Py4D Plugin:
http://www.py4d.com/get-py4d/
'''
import c4d
from c4d import documents,UVWTag,storage
from c4d.utils import *
from c4d import symbols as sy, plugins, utils, bitmaps, gui
import math
import re
# utils
clean = lambda varStr: re.sub('\W|^(?=\d)','_', varStr)
# from Active State's Python recipies: http://code.activestate.com/recipes/266466-html-colors-tofrom-rgb-tuples/
def RGBToHTMLColor(rgb_tuple):
return '0x%02x%02x%02x' % rgb_tuple
def Export():
if not op: return
if op.GetType() != 5100:
print 'Selected Object is not an editable mesh'
return
unit = 0.001#for scale
fps = doc.GetFps()
bd = doc.GetRenderBaseDraw()
scr = bd.GetFrameScreen()
rd = doc.GetActiveRenderData()
name = op.GetName()
classname = clean(name)
c4dPath = c4d.storage.GeGetC4DPath(sy.C4D_PATH_LIBRARY)
jsFile = open(c4dPath+'/scripts/Three.js','r')
js = jsFile.read()
htmlFile = open(c4dPath+'/scripts/template.html','r')
html = htmlFile.read()
html = html.replace('%s',classname)
code = 'var %s = function () {\n\n\tvar scope = this;\n\n\tTHREE.Geometry.call(this);\n\n' % classname
def GetMesh(code):
# goto 0
doc.SetTime(c4d.BaseTime(0, fps))
c4d.DrawViews( c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_REDUCTION|c4d.DA_STATICBREAK )
c4d.GeSyncMessage(c4d.EVMSG_TIMECHANGED)
doc.SetTime(doc.GetTime())
c4d.EventAdd(c4d.EVENT_ANIMATE)
SendModelingCommand(command = MCOMMAND_REVERSENORMALS, list = [op], mode = MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
verts = op.GetPointAll()
for v in verts:
code += '\tv( %.6f, %.6f, %.6f );\n' % (v.x, -v.y, v.z)
code += '\n'
ncount = 0
uvcount = 0
faces = op.GetAllPolygons()
normals = op.CreatePhongNormals()
ndirection = 1
hasUV = False
for tag in op.GetTags():
if tag.GetName() == "UVW":
uvw = tag
hasUV = True
for f in faces:
if(f.d == f.c):
if(normals):
code += '\tf3( %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf3( %d, %d, %d );\n' % (f.a, f.b, f.c)
else:
if(normals):
code += '\tf4( %d, %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, f.d, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf4( %d, %d, %d, %d );\n' % (f.a, f.b, f.c, f.d)
if hasUV:
uv = uvw.Get(uvcount);
# uvs += '[Vector('+str(uv[0].x)+','+str(1.0-uv[0].y)+'),Vector('+str(uv[1].x)+','+str(1.0-uv[1].y)+'),Vector('+str(uv[2].x)+','+str(1.0-uv[2].y)+')],'
if len(uv) == 4:
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv[0].x, uv[0].y, uv[1].x, uv[1].y, uv[2].x, uv[2].y, uv[3].x, uv[3].y)
else:
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv[0].x, uv[0].y, uv[1].x, uv[1].y, uv[2].x, uv[2].y)
ncount += 1
uvcount += 1
code +='\n\tthis.computeCentroids();\n\tthis.computeNormals(true);\n'
#selection color
code +='\n\tscope.colors = {};\n'
code +='\tscope.selections = {};\n'
selName = ''
for tag in op.GetTags():
if(tag.GetType() == 5616): #texture tag
material = tag.GetMaterial()
color = material[sy.MATERIAL_COLOR_COLOR]
tag.SetBit(c4d.BIT_ACTIVE)
selName = clean(tag[sy.TEXTURETAG_RESTRICTION])
if len(selName) == 0: print "*** WARNING! *** Missing selection name for material: " + material.GetName()
code += '\tscope.colors["'+selName+'"] = '+str(RGBToHTMLColor((color.x*255,color.y*255,color.z*255)))+';\n'
if tag.GetType() == 5673: #selection tag
# print 'selection: ' + tag.GetName()
sel = tag.GetSelection()
selName = clean(tag.GetName())
ids = sel.GetAll(op.GetPointCount())
indices = [i for i, e in enumerate(ids) if e != 0]
code += '\tscope.selections["'+selName+'"] = '+str(indices)+';\n'
code += '\n\tscope.autoColor = function(){\n'
code += '\t\tfor(var s in this.selections){\n'
code += '\t\t\tfor(var i = 0 ; i < this.selections[s].length; i++) this.faces[this.selections[s][i]].material = [new THREE.MeshBasicMaterial({color:this.colors[s]})];\n'
code += '\t\t}\n\t}\n'
# model position, rotation, scale rotation x,y,z = H,P,B => three.js x,y,z is P,H,B => y,x,z
p = op.GetPos()
r = op.GetRot()
s = op.GetScale()
code += '\n\tscope.getPosition = function(){\treturn new THREE.Vector3'+str((p.x,p.y,p.z))+';\t}\n'
code += '\n\tscope.getRotation = function(){\treturn new THREE.Vector3'+str((r.y,r.x,r.z))+';\t}\n'
code += '\n\tscope.getScale = function(){\treturn new THREE.Vector3'+str((s.x,s.y,s.z))+';\t}\n'
code += '\n'
code += '\tfunction v( x, y, z ) {\n\n'
code += '\t\tscope.vertices.push( new THREE.Vertex( new THREE.Vector3( x, y, z ) ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f3( a, b, c, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face3( a, b, c, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f4( a, b, c, d, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face4( a, b, c, d, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction uv( u1, v1, u2, v2, u3, v3, u4, v4 ) {\n\n'
code += '\t\tvar uv = [];\n'
code += '\t\tuv.push( new THREE.UV( u1, v1 ) );\n'
code += '\t\tuv.push( new THREE.UV( u2, v2 ) );\n'
code += '\t\tuv.push( new THREE.UV( u3, v3 ) );\n'
code += '\t\tif ( u4 && v4 ) uv.push( new THREE.UV( u4, v4 ) );\n'
code += '\t\tscope.uvs.push( uv );\n'
code += '\t}\n\n'
code += '}\n\n'
code += '%s.prototype = new THREE.Geometry();\n' % classname
code += '%s.prototype.constructor = %s;' % (classname, classname)
SendModelingCommand(command = MCOMMAND_REVERSENORMALS, list = [op], mode = MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
return code
code = GetMesh(code)
docPath = doc.GetDocumentPath()
jspath = docPath+'/'+classname+'.js'
htmlpath = docPath+'/'+classname+'.html'
file = open(jspath,'w')
file.write(code)
file.close()
file = open(htmlpath,'w')
file.write(html)
file.close()
file = open(docPath+'/Three.js','w')
file.write(js)
file.close()
print 'Export Complete!'
Export() | mit |
mariopro/youtube-dl | test/test_write_annotations.py | 78 | 2550 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, try_rm
import io
import xml.etree.ElementTree
import youtube_dl.YoutubeDL
import youtube_dl.extractor
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs)
self.to_stderr = self.to_screen
params = get_params({
'writeannotations': True,
'skip_download': True,
'writeinfojson': False,
'format': 'flv',
})
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
annotationsTag = root.find('annotations')
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
# Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurance, there could be more than one annotation with the same text
expected.remove(text)
# We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self):
try_rm(ANNOTATIONS_FILE)
if __name__ == '__main__':
unittest.main()
| unlicense |
AnimeshSinha1309/WebsiteEdunet | WebsiteEdunet/env/Lib/site-packages/django/db/models/sql/subqueries.py | 59 | 8022 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| mit |
wfxiang08/django190 | tests/migrations/test_base.py | 292 | 4620 | import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
recorder = MigrationRecorder(connection)
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
| bsd-3-clause |
barachka/odoo | addons/hr_evaluation/__openerp__.py | 62 | 3292 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Appraisals',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 31,
'website': 'http://www.openerp.com',
'summary': 'Periodical Evaluations, Appraisals, Surveys',
'images': ['images/hr_evaluation_analysis.jpeg',
'images/hr_evaluation.jpeg',
'images/hr_interview_requests.jpeg'],
'depends': ['hr', 'calendar', 'survey'],
'description': """
Periodical Employees evaluation and appraisals
==============================================
By using this application you can maintain the motivational process by doing periodical evaluations of your employees' performance. The regular assessment of human resources can benefit your people as well your organization.
An evaluation plan can be assigned to each employee. These plans define the frequency and the way you manage your periodic personal evaluations. You will be able to define steps and attach interview forms to each step.
Manages several types of evaluations: bottom-up, top-down, self-evaluations and the final evaluation by the manager.
Key Features
------------
* Ability to create employees evaluations.
* An evaluation can be created by an employee for subordinates, juniors as well as his manager.
* The evaluation is done according to a plan in which various surveys can be created. Each survey can be answered by a particular level in the employees hierarchy. The final review and evaluation is done by the manager.
* Every evaluation filled by employees can be viewed in a PDF form.
* Interview Requests are generated automatically by OpenERP according to employees evaluation plans. Each user receives automatic emails and requests to perform a periodical evaluation of their colleagues.
""",
"data": [
'security/ir.model.access.csv',
'security/hr_evaluation_security.xml',
'hr_evaluation_view.xml',
'report/hr_evaluation_report_view.xml',
'survey_data_appraisal.xml',
'hr_evaluation_data.xml',
'hr_evaluation_installer.xml',
],
"demo": ["hr_evaluation_demo.xml"],
# 'test': [
# 'test/test_hr_evaluation.yml',
# 'test/hr_evalution_demo.yml',
# ],
'auto_install': False,
'installable': True,
'application': True,
}
| agpl-3.0 |
pombredanne/invenio | modules/bibclassify/lib/bibclassify_acronym_analyzer.py | 1 | 9253 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Bibclassify acronym analyser.
"""
import re
ACRONYM_BRACKETS_REGEX = re.compile("[([] ?(([A-Z]\.?){2,})s? ?[)\]]")
DOTS_REGEX = re.compile("\.")
MAXIMUM_LEVEL = 2
STOPLIST = ("and", "of", "for", "the", "to", "do", "de", "theory",
"model", "radiation", "scheme", "representation")
# INTERFACE
def get_acronyms(fulltext):
"""Finds acronyms and expansions from the fulltext. If needed,
acronyms can already contain a dictionary of previously found
acronyms that will be merged with the current results."""
acronyms = {}
for m in ACRONYM_BRACKETS_REGEX.finditer(fulltext):
acronym = DOTS_REGEX.sub("", m.group(1))
potential_expansion = fulltext[m.start() - 80:m.start()].replace("\n",
" ")
# Strip
potential_expansion = re.sub("(\W).(\W)", "\1\2", potential_expansion)
potential_expansion = re.sub("(\w)\(s\)\W", "\1", potential_expansion)
potential_expansion = re.sub("""[^\w'"]+$""", "", potential_expansion)
potential_expansion = re.sub("[[(].+[\])]", "", potential_expansion)
potential_expansion = re.sub(" {2,}", " ", potential_expansion)
# LEVEL 0: expansion between quotes
# Double quotes
match = re.search(""""([^"]+)["]$""", potential_expansion)
if match is None:
# Single quotes
match = re.search("""'([^"]+)[']$""", potential_expansion)
if match is not None:
if acronym in match.group(1):
continue
pattern = ""
for char in acronym[:-1]:
pattern += "%s\w+\W*" % char
pattern += "%s\w+" % acronym[-1]
if re.search(pattern, match.group(1), re.I) is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 0,
acronyms)
continue
pattern = "\W("
for char in acronym[:-1]:
pattern += "%s\w+\W+" % char
pattern += "%s\w+)$" % acronym[-1]
# LEVEL 1: expansion with uppercase initials
match = re.search(pattern, potential_expansion)
if match is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 1,
acronyms)
continue
# LEVEL 2: expansion with initials
match = re.search(pattern, potential_expansion, re.I)
if match is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 2,
acronyms)
continue
# LEVEL 3: expansion with initials and STOPLIST
potential_expansion_stripped = " ".join([word for word in
_words(potential_expansion) if word not in STOPLIST])
match = re.search(pattern, potential_expansion_stripped, re.I)
if match is not None:
first_expansion_word = re.search("\w+", match.group(1)).group()
start = potential_expansion.lower().rfind(first_expansion_word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 3, acronyms)
continue
# LEVEL 4: expansion with fuzzy initials and stoplist
reversed_words = _words(potential_expansion_stripped)
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = "_"
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = "_"
if char == next_char and \
word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 4, acronyms)
continue
# LEVEL 5: expansion with fuzzy initials
reversed_words = _words(potential_expansion.lower())
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = ""
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = ""
if char == next_char and \
word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 5, acronyms)
continue
return acronyms
# PRIVATE METHODS
def _words(expression):
"""Returns a list of words of the expression."""
return re.findall("\w+", expression.lower())
def _add_expansion_to_acronym_dict(acronym, expansion, level, dictionary):
"""Adds an acronym to the dictionary. Takes care of avoiding
duplicates and keeping the expansion marked with the best score."""
if len(acronym) >= len(expansion) or acronym in expansion:
return
for punctuation in re.findall("\W", expansion):
# The expansion contains non-basic punctuation. It is probable
# that it is invalid. Discard it.
if punctuation not in (",", " ", "-"):
return False
if acronym in dictionary:
add = True
for stored_expansion, stored_level in dictionary[acronym]:
if _equivalent_expansions(stored_expansion, expansion):
if level < stored_level:
dictionary[acronym].remove((stored_expansion, stored_level))
break
else:
add = False
if add:
dictionary[acronym].append((expansion, level))
return True
else:
dictionary.setdefault(acronym, []).append((expansion, level))
return True
return False
def _equivalent_expansions(expansion1, expansion2):
"""Compares 2 expansions."""
words1 = _words(expansion1)
words2 = _words(expansion2)
simplified_versions = []
if words1 == words2:
return True
for words in (words1, words2):
store = []
for word in words:
store.append(word[:5])
simplified_versions.append("".join(store))
return simplified_versions[0] == simplified_versions[1]
| gpl-2.0 |
malexandre/python-xhtml2pdf-demo | reportlab/tests/test_pdfgen_links.py | 14 | 6928 | #Copyright ReportLab Europe Ltd. 2000-2012
#this test and associates functionality kinds donated by Ian Sparks.
#see license.txt for license details
"""
Tests for internal links and destinations
"""
__version__='''$Id$'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 Ian Sparks
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.highlightAnnotation('annotation 0',[inch,inch,2*inch,2*inch])
c.highlightAnnotation('annotation 1',[2*inch,3*inch,3*inch,3.5*inch])
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print("wrote", fn)
printLocation()
| mit |
Dhivyap/ansible | test/units/modules/storage/netapp/test_na_ontap_snapshot.py | 38 | 8659 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests ONTAP Ansible module: na_ontap_nvme_snapshot'''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_snapshot \
import NetAppOntapSnapshot as my_module
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None):
''' save arguments '''
self.type = kind
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'snapshot':
xml = self.build_snapshot_info()
elif self.type == 'snapshot_fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_snapshot_info():
''' build xml data for snapshot-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 1,
'attributes-list': {'snapshot-info': {'comment': 'new comment',
'name': 'ansible',
'snapmirror-label': 'label12'}}}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.onbox = False
def set_default_args(self):
if self.onbox:
hostname = '10.193.75.3'
username = 'admin'
password = 'netapp1!'
vserver = 'ansible'
volume = 'ansible'
snapshot = 'ansible'
comment = 'new comment'
snapmirror_label = 'label12'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
vserver = 'vserver'
volume = 'ansible'
snapshot = 'ansible'
comment = 'new comment'
snapmirror_label = 'label12'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'vserver': vserver,
'volume': volume,
'snapshot': snapshot,
'comment': comment,
'snapmirror_label': snapmirror_label
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_get_called(self):
''' test get_snapshot() for non-existent snapshot'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
assert my_obj.get_snapshot() is None
def test_ensure_get_called_existing(self):
''' test get_snapshot() for existing snapshot'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = MockONTAPConnection(kind='snapshot')
assert my_obj.get_snapshot()
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.create_snapshot')
def test_successful_create(self, create_snapshot):
''' creating snapshot and testing idempotency '''
set_module_args(self.set_default_args())
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
create_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.modify_snapshot')
def test_successful_modify(self, modify_snapshot):
''' modifying snapshot and testing idempotency '''
data = self.set_default_args()
data['comment'] = 'adding comment'
data['snapmirror_label'] = 'label22'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
modify_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
data['comment'] = 'new comment'
data['snapmirror_label'] = 'label12'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.delete_snapshot')
def test_successful_delete(self, delete_snapshot):
''' deleting snapshot and testing idempotency '''
data = self.set_default_args()
data['state'] = 'absent'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
delete_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_if_all_methods_catch_exception(self):
module_args = {}
module_args.update(self.set_default_args())
set_module_args(module_args)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot_fail')
with pytest.raises(AnsibleFailJson) as exc:
my_obj.create_snapshot()
assert 'Error creating snapshot ansible:' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.delete_snapshot()
assert 'Error deleting snapshot ansible:' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.modify_snapshot()
assert 'Error modifying snapshot ansible:' in exc.value.args[0]['msg']
| gpl-3.0 |
synconics/odoo | addons/crm/wizard/crm_merge_opportunities.py | 333 | 4562 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_merge_opportunity(osv.osv_memory):
"""
Merge opportunities together.
If we're talking about opportunities, it's just because it makes more sense
to merge opps than leads, because the leads are more ephemeral objects.
But since opportunities are leads, it's also possible to merge leads
together (resulting in a new lead), or leads and opps together (resulting
in a new opp).
"""
_name = 'crm.merge.opportunity'
_description = 'Merge opportunities'
_columns = {
'opportunity_ids': fields.many2many('crm.lead', rel='merge_opportunity_rel', id1='merge_id', id2='opportunity_id', string='Leads/Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True),
}
def action_merge(self, cr, uid, ids, context=None):
context = dict(context or {})
lead_obj = self.pool.get('crm.lead')
wizard = self.browse(cr, uid, ids[0], context=context)
opportunity2merge_ids = wizard.opportunity_ids
#TODO: why is this passed through the context ?
context['lead_ids'] = [opportunity2merge_ids[0].id]
merge_id = lead_obj.merge_opportunity(cr, uid, [x.id for x in opportunity2merge_ids], wizard.user_id.id, wizard.section_id.id, context=context)
# The newly created lead might be a lead or an opp: redirect toward the right view
merge_result = lead_obj.browse(cr, uid, merge_id, context=context)
if merge_result.type == 'opportunity':
return lead_obj.redirect_opportunity_view(cr, uid, merge_id, context=context)
else:
return lead_obj.redirect_lead_view(cr, uid, merge_id, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Use active_ids from the context to fetch the leads/opps to merge.
In order to get merged, these leads/opps can't be in 'Dead' or 'Closed'
"""
if context is None:
context = {}
record_ids = context.get('active_ids', False)
res = super(crm_merge_opportunity, self).default_get(cr, uid, fields, context=context)
if record_ids:
opp_ids = []
opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)
for opp in opps:
if opp.probability < 100:
opp_ids.append(opp.id)
if 'opportunity_ids' in fields:
res.update({'opportunity_ids': opp_ids})
return res
def on_change_user(self, cr, uid, ids, user_id, section_id, context=None):
""" When changing the user, also set a section_id or restrict section id
to the ones user_id is member of. """
if user_id:
if section_id:
user_in_section = self.pool.get('crm.case.section').search(cr, uid, [('id', '=', section_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_section = False
if not user_in_section:
section_id = False
section_ids = self.pool.get('crm.case.section').search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context)
if section_ids:
section_id = section_ids[0]
return {'value': {'section_id': section_id}}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dmsuehir/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/gmm_test.py | 12 | 2125 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests GMM scoring engine """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class GMM(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(GMM, self).setUp()
data_file = self.get_file("gmm_data.csv")
self.frame = self.context.frame.import_csv(
data_file, schema=[("x1", float), ("x2", float)])
def test_model_scoring(self):
"""Test publishing a gmm model"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=5,
max_iterations=500,
seed=20,
convergence_tol=0.0001)
predict = model.predict(self.frame)
test_rows = predict.to_pandas(predict.count())
file_name = self.get_name("gmm")
model_path = model.export_to_mar(self.get_export_file(file_name))
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for i, row in test_rows.iterrows():
res = scorer.score(
[dict(zip(["x1", "x2"], list(row[0:2])))])
self.assertEqual(
row["predicted_cluster"], res.json()["data"][0]['Score'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ddub/lint-review | lintreview/tools/phpcs.py | 2 | 1647 | import logging
import os
import functools
from lintreview.tools import Tool
from lintreview.tools import run_command
from lintreview.utils import in_path
log = logging.getLogger(__name__)
class Phpcs(Tool):
name = 'phpcs'
def check_dependencies(self):
"""
See if phpcs is on the system path.
"""
return in_path('phpcs')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext == '.php'
def process_files(self, files):
"""
Run code checks with phpcs.
Only a single process is made for all files
to save resources.
"""
log.debug('Processing %s files with %s', files, self.name)
command = self.create_command(files)
output = run_command(
command,
ignore_error=True)
filename_converter = functools.partial(
self._relativize_filename,
files)
self._process_checkstyle(output, filename_converter)
def create_command(self, files):
command = ['phpcs', '--report=checkstyle']
standard = 'PEAR'
if self.options.get('standard'):
standard = self.apply_base(self.options['standard'])
extension = 'php'
if self.options.get('extensions'):
extension = self.options['extensions']
command += ['--standard=' + standard]
command += ['--extensions=' + extension]
if self.options.get('tab_width'):
command += ['--tab-width=' + self.options['tab_width']]
command += files
return command
| mit |
inovtec-solutions/OpenERP | openerp/addons/mrp/wizard/mrp_price.py | 56 | 2144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_price(osv.osv_memory):
_name = 'mrp.product_price'
_description = 'Product Price'
_columns = {
'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."),
}
_defaults = {
'number': 1,
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Product cost structure
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['number'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'product.price',
'datas' : datas,
}
mrp_price()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HarborYuan/cashier | env/Lib/site-packages/pip/_vendor/lockfile/linklockfile.py | 536 | 2652 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
| mit |
cseed/hail | datasets/extract/extract.Human_Cell_Atlas_cord_blood_immunocytes.py | 3 | 2026 | #!/usr/bin/env python3
import requests, h5py
import numpy as np
from subprocess import call
print('Fetching "ica_cord_blood_h5.h5"...')
response = requests.get('https://s3.amazonaws.com/preview-ica-expression-data/ica_cord_blood_h5.h5')
with open('/tmp/ica_cord_blood_h5.h5', 'wb') as f:
f.write(response.content)
h5 = h5py.File('/tmp/ica_cord_blood_h5.h5', 'r')['GRCh38']
print('Extracting data...')
np.savetxt('/tmp/Human_Cell_Atlas_cord_blood_immunocytes.counts.tsv',
np.column_stack((h5['indices'], h5['data'])),
delimiter='\t',
header='gene_idx\tcount',
comments='',
fmt='%s')
np.savetxt('/tmp/Human_Cell_Atlas_cord_blood_immunocytes.barcodes.tsv',
np.column_stack((h5['indptr'][:-1], h5['barcodes'])),
delimiter='\t',
header='barcode_idx\tbarcode',
comments='',
fmt='%s')
np.savetxt('/tmp/Human_Cell_Atlas_cord_blood_immunocytes.genes.tsv',
np.column_stack((h5['genes'], h5['gene_names'])),
delimiter='\t',
header='gene_id\tgene_name',
comments='',
fmt='%s')
print('Block compressing...')
call(['bgzip', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.counts.tsv'])
call(['bgzip', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.barcodes.tsv'])
call(['bgzip', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.genes.tsv'])
print('Copying block compressed files...')
call(['gsutil', 'cp', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.counts.tsv.gz',
'gs://hail-datasets-raw-data/Human_Cell_Atlas/Human_Cell_Atlas_cord_blood_immunocytes/counts.tsv.bgz'])
call(['gsutil', 'cp', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.barcodes.tsv.gz',
'gs://hail-datasets-raw-data/Human_Cell_Atlas/Human_Cell_Atlas_cord_blood_immunocytes/barcodes.tsv.bgz'])
call(['gsutil', 'cp', '/tmp/Human_Cell_Atlas_cord_blood_immunocytes.genes.tsv.gz',
'gs://hail-datasets-raw-data/Human_Cell_Atlas/Human_Cell_Atlas_cord_blood_immunocytes/genes.tsv.bgz'])
| mit |
github-account-because-they-want-it/django-allauth | allauth/socialaccount/providers/angellist/provider.py | 75 | 1034 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AngelListAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('angellist_url')
def get_avatar_url(self):
return self.account.extra_data.get('image')
def to_str(self):
dflt = super(AngelListAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class AngelListProvider(OAuth2Provider):
id = 'angellist'
name = 'AngelList'
package = 'allauth.socialaccount.providers.angellist'
account_class = AngelListAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(email=data.get('email'),
username=data.get('angellist_url').split('/')[-1],
name=data.get('name'))
providers.registry.register(AngelListProvider)
| mit |
yufengg/tensorflow | tensorflow/python/saved_model/signature_def_utils.py | 89 | 1583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions.
Utility functions for constructing SignatureDef protos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import classification_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
from tensorflow.python.saved_model.signature_def_utils_impl import regression_signature_def
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"build_signature_def",
"classification_signature_def",
"predict_signature_def",
"regression_signature_def",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
guru-digital/CouchPotatoServer | libs/suds/umx/__init__.py | 203 | 1811 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
unmarshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
@ivar node: The content source node.
@type node: L{sax.element.Element}
@ivar data: The (optional) content data.
@type data: L{Object}
@ivar text: The (optional) content (xml) text.
@type text: basestring
"""
extensions = []
def __init__(self, node, **kwargs):
Object.__init__(self)
self.node = node
self.data = None
self.text = None
for k,v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError, \
'Content has no attribute %s' % name
else:
v = self.__dict__[name]
return v | gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/version.py | 1151 | 11556 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| mit |
cuckoobox/cuckoo | cuckoo/common/irc.py | 1 | 4996 | # Copyright (C) 2012 JoseMi Holguin (@j0sm1)
# Copyright (C) 2013 Claudio Guarnieri.
# Copyright (C) 2014-2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import cStringIO
import re
import logging
from cuckoo.common.utils import convert_to_printable
log = logging.getLogger("Processing.Pcap.irc.protocol")
class ircMessage(object):
"""IRC Protocol Request."""
# Client commands
__methods_client = dict.fromkeys((
"PASS", "JOIN", "USER", "OPER", "MODE", "SERVICE", "QUIT", "SQUIT",
"PART", "TOPIC", "NAMES", "LIST", "INVITE", "KICK", "PRIVMSG",
"NOTICE", "MOTD", "LUSERS", "VERSION", "STATS", "LINKS", "TIME",
"CONNECT", "TRACE", "ADMIN", "INFO", "SERVLIST", "SQUERY", "WHO",
"WHOIS", "WHOWAS", "KILL", "PING", "PONG", "ERROR", "AWAY", "REHASH",
"DIE", "RESTART", "SUMMON", "USERS", "WALLOPS", "USERHOST", "NICK",
"ISON"
))
def __init__(self):
self._messages = []
# Server commandis : prefix - command - params
self._sc = {}
# Client commands : command - params
self._cc = {}
def _unpack(self, buf):
"""Extract into a list irc messages of a tcp streams.
@buf: tcp stream data
"""
try:
f = cStringIO.StringIO(buf)
lines = f.readlines()
except Exception:
log.error("Failed reading tcp stream buffer")
return False
for element in lines:
if not re.match("^:", element) is None:
command = "([a-zA-Z]+|[0-9]{3})"
params = "(\x20.+)"
irc_server_msg = re.findall(
"(^:[\w+.{}!@|()]+\x20)" + command + params, element
)
if irc_server_msg:
self._sc["prefix"] = convert_to_printable(irc_server_msg[0][0].strip())
self._sc["command"] = convert_to_printable(irc_server_msg[0][1].strip())
self._sc["params"] = convert_to_printable(irc_server_msg[0][2].strip())
self._sc["type"] = "server"
self._messages.append(dict(self._sc))
else:
irc_client_msg = re.findall(
"([a-zA-Z]+\x20)(.+[\x0a\0x0d])", element
)
if irc_client_msg and irc_client_msg[0][0].strip() in self.__methods_client:
self._cc["command"] = convert_to_printable(irc_client_msg[0][0].strip())
self._cc["params"] = convert_to_printable(irc_client_msg[0][1].strip())
self._cc["type"] = "client"
self._messages.append(dict(self._cc))
def getClientMessages(self, buf):
"""Get irc client commands of tcp streams.
@buf: list of messages
@return: dictionary of the client messages
"""
try:
self._unpack(buf)
except Exception:
return None
entry_cc = []
for msg in self._messages:
if msg["type"] == "client":
entry_cc.append(msg)
return entry_cc
def getClientMessagesFilter(self, buf, filters):
"""Get irc client commands of tcp streams.
@buf: list of messages
@return: dictionary of the client messages filtered
"""
try:
self._unpack(buf)
except Exception:
return None
entry_cc = []
for msg in self._messages:
if msg["type"] == "client" and msg["command"] not in filters:
entry_cc.append(msg)
return entry_cc
def getServerMessages(self, buf):
"""Get irc server commands of tcp streams.
@buf: list of messages
@return: dictionary of server messages
"""
try:
self._unpack(buf)
except Exception:
return None
entry_sc = []
for msg in self._messages:
if msg["type"] == "server":
entry_sc.append(msg)
return entry_sc
def getServerMessagesFilter(self, buf, filters):
"""Get irc server commands of tcp streams.
@buf: list of messages
@return: dictionary of server messages filtered
"""
try:
self._unpack(buf)
except Exception:
return None
entry_sc = []
for msg in self._messages:
if msg["type"] == "server" and msg["command"] not in filters:
entry_sc.append(msg)
return entry_sc
def isthereIRC(self, buf):
"""Check if there is irc messages in a stream TCP.
@buf: stream data
@return: boolean result
"""
try:
self._unpack(buf)
if self._messages:
return True
else:
return False
except Exception:
return False
| mit |
gsehub/edx-platform | openedx/core/djangoapps/credentials/tasks/v1/tasks.py | 13 | 2157 | """
This file contains celery tasks for credentials-related functionality.
"""
from celery import task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.credentials.utils import get_credentials_api_client
logger = get_task_logger(__name__)
# Under cms the following setting is not defined, leading to errors during tests.
# These tasks aren't strictly credentials generation, but are similar in the sense
# that they generate records on the credentials side. And have a similar SLA.
ROUTING_KEY = getattr(settings, 'CREDENTIALS_GENERATION_ROUTING_KEY', None)
# Maximum number of retries before giving up.
# For reference, 11 retries with exponential backoff yields a maximum waiting
# time of 2047 seconds (about 30 minutes). Setting this to None could yield
# unwanted behavior: infinite retries.
MAX_RETRIES = 11
@task(bind=True, ignore_result=True, routing_key=ROUTING_KEY)
def send_grade_to_credentials(self, username, course_run_key, verified, letter_grade, percent_grade):
""" Celery task to notify the Credentials IDA of a grade change via POST. """
logger.info('Running task send_grade_to_credentials for username %s and course %s', username, course_run_key)
countdown = 2 ** self.request.retries
course_key = CourseKey.from_string(course_run_key)
try:
credentials_client = get_credentials_api_client(
User.objects.get(username=settings.CREDENTIALS_SERVICE_USERNAME),
org=course_key.org,
)
credentials_client.grades.post({
'username': username,
'course_run': str(course_key),
'letter_grade': letter_grade,
'percent_grade': percent_grade,
'verified': verified,
})
logger.info('Sent grade for course %s to user %s', course_run_key, username)
except Exception as exc:
logger.exception('Failed to send grade for course %s to user %s', course_run_key, username)
raise self.retry(exc=exc, countdown=countdown, max_retries=MAX_RETRIES)
| agpl-3.0 |
Shekharrajak/django-db-mailer | dbmail/south_migrations/0003_auto__add_field_mailtemplate_context_note.py | 2 | 7987 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailTemplate.context_note'
db.add_column('dbmail_mailtemplate', 'context_note',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailTemplate.context_note'
db.delete_column('dbmail_mailtemplate', 'context_note')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dbmail.mailcategory': {
'Meta': {'object_name': 'MailCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dbmail.mailgroup': {
'Meta': {'object_name': 'MailGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dbmail.mailgroupemail': {
'Meta': {'object_name': 'MailGroupEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dbmail.MailGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dbmail.maillog': {
'Meta': {'object_name': 'MailLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'num_of_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dbmail.MailTemplate']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'dbmail.maillogemail': {
'Meta': {'object_name': 'MailLogEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dbmail.MailLog']"}),
'mail_type': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'dbmail.mailtemplate': {
'Meta': {'object_name': 'MailTemplate'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dbmail.MailCategory']", 'null': 'True', 'blank': 'True'}),
'context_note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_html': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'num_of_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['dbmail'] | gpl-2.0 |
nhicher/ansible | lib/ansible/module_utils/exoscale.py | 96 | 5159 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, René Moser <mail@renemoser.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
import os
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import integer_types, string_types
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
def exo_dns_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
validate_certs=dict(default=True, type='bool'),
)
def exo_dns_required_together():
return [['api_key', 'api_secret']]
class ExoDns(object):
def __init__(self, module):
self.module = module
self.api_key = self.module.params.get('api_key')
self.api_secret = self.module.params.get('api_secret')
if not (self.api_key and self.api_secret):
try:
region = self.module.params.get('api_region')
config = self.read_config(ini_group=region)
self.api_key = config['key']
self.api_secret = config['secret']
except Exception as e:
self.module.fail_json(msg="Error while processing config: %s" % to_native(e))
self.headers = {
'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.result = {
'changed': False,
'diff': {
'before': {},
'after': {},
}
}
def read_config(self, ini_group=None):
if not ini_group:
ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
keys = ['key', 'secret']
env_conf = {}
for key in keys:
if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
break
else:
env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
else:
return env_conf
# Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
# Last read wins in configparser
paths = (
os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
os.path.join(os.getcwd(), 'cloudstack.ini'),
)
# Look at CLOUDSTACK_CONFIG first if present
if 'CLOUDSTACK_CONFIG' in os.environ:
paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
if not any([os.path.exists(c) for c in paths]):
self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
conf = configparser.ConfigParser()
conf.read(paths)
return dict(conf.items(ini_group))
def api_query(self, resource="/domains", method="GET", data=None):
url = EXO_DNS_BASEURL + resource
if data:
data = self.module.jsonify(data)
response, info = fetch_url(
module=self.module,
url=url,
data=data,
method=method,
headers=self.headers,
timeout=self.module.params.get('api_timeout'),
)
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return self.module.from_json(to_text(response.read()))
except Exception as e:
self.module.fail_json(msg="Could not process response into json: %s" % to_native(e))
def has_changed(self, want_dict, current_dict, only_keys=None):
changed = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(current_dict[key], integer_types):
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
elif isinstance(current_dict[key], string_types):
if value.lower() != current_dict[key].lower():
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
else:
self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
else:
self.result['diff']['after'][key] = value
changed = True
return changed
| gpl-3.0 |
mhugent/Quantum-GIS | python/plugins/processing/algs/saga/ext/supervisedclassification.py | 2 | 1496 | # -*- coding: utf-8 -*-
"""
***************************************************************************
supervisedclassification.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.ProcessingConfig import ProcessingConfig
from processing.algs.saga.SagaUtils import SagaUtils
from processing.tests.TestData import table
def editCommands(commands):
saga208 = ProcessingConfig.getSetting(SagaUtils.SAGA_208)
if not saga208:
commands[-3] = commands[-3] + ' -STATS ' + table()
return commands
else:
return commands
| gpl-2.0 |
cybercercher/crits | crits/samples/api.py | 9 | 5540 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.samples.sample import Sample
from crits.samples.handlers import handle_uploaded_file
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class SampleResource(CRITsAPIResource):
"""
Class to handle everything related to the Sample API.
Currently supports GET and POST.
"""
class Meta:
object_class = Sample
allowed_methods = ('get', 'post', 'patch')
resource_name = "samples"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(SampleResource, self).get_object_list(request, Sample)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Samples through the API.
:param bundle: Bundle containing the information to create the Sample.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
type_ = bundle.data.get('upload_type', None)
content = {'return_code': 1,
'type': 'Sample'}
if not type_:
content['message'] = 'Must provide an upload type.'
self.crits_response(content)
if type_ not in ('metadata', 'file'):
content['message'] = 'Not a valid upload type.'
self.crits_response(content)
if type_ == 'metadata':
filename = bundle.data.get('filename', None)
md5 = bundle.data.get('md5', None)
password = None
filedata = None
elif type_ == 'file':
md5 = None
password = bundle.data.get('password', None)
file_ = bundle.data.get('filedata', None)
if not file_:
content['message'] = "Upload type of 'file' but no file uploaded."
self.crits_response(content)
filedata = file_
filename = None
campaign = bundle.data.get('campaign', None)
confidence = bundle.data.get('confidence', None)
source = bundle.data.get('source', None)
method = bundle.data.get('method', "")
reference = bundle.data.get('reference', None)
file_format = bundle.data.get('file_format', None)
related_md5 = bundle.data.get('related_md5', None)
related_id = bundle.data.get('related_id', None)
related_type = bundle.data.get('related_type', None)
backdoor_name = bundle.data.get('backdoor_name', None)
backdoor_version = bundle.data.get('backdoor_version', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
if ((related_id and not related_type) or
(related_type and not related_id)):
content['message'] = "Must specify related_type and related_id"
self.crits_response(content)
sample_md5 = handle_uploaded_file(filedata,
source,
method,
reference,
file_format,
password,
user=analyst,
campaign=campaign,
confidence=confidence,
related_md5 = related_md5,
related_id = related_id,
related_type = related_type,
filename=filename,
md5=md5,
bucket_list=bucket_list,
ticket=ticket,
is_return_only_md5=False,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
result = {'success': False}
if len(sample_md5) > 0:
result = sample_md5[0]
if result.get('message'):
content['message'] = result.get('message')
if result.get('object'):
content['id'] = str(result.get('object').id)
if content.get('id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'samples',
'api_name': 'v1',
'pk': content.get('id')})
content['url'] = url
else:
content['message'] = "Could not create Sample for unknown reason."
if result['success']:
content['return_code'] = 0
self.crits_response(content)
| mit |
pelson/cartopy | lib/cartopy/tests/mpl/test_mpl_integration.py | 2 | 22762 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import math
import re
import warnings
import numpy as np
import matplotlib.pyplot as plt
import pytest
import six
import cartopy.crs as ccrs
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
_ROB_TOL = 0.5 if ccrs.PROJ4_VERSION < (4, 9) else 0.111
_CONTOUR_STYLE = _STREAMPLOT_STYLE = 'classic'
if MPL_VERSION >= '3.0.0':
_CONTOUR_IMAGE = 'global_contour_wrap'
_CONTOUR_STYLE = 'mpl20'
_STREAMPLOT_IMAGE = 'streamplot_mpl_3.0.0'
# Should have been the case for anything but _1.4.3, but we don't want to
# regenerate those images again.
_STREAMPLOT_STYLE = 'mpl20'
else:
_CONTOUR_IMAGE = 'global_contour_wrap_mpl_pre_3.0.0'
if MPL_VERSION >= '2.1.0':
_STREAMPLOT_IMAGE = 'streamplot_mpl_2.1.0'
elif MPL_VERSION >= '2':
_STREAMPLOT_IMAGE = 'streamplot_mpl_2.0.0'
else:
_STREAMPLOT_IMAGE = 'streamplot_mpl_1.4.3'
@pytest.mark.natural_earth
@ImageTesting([_CONTOUR_IMAGE], style=_CONTOUR_STYLE)
def test_global_contour_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting([_CONTOUR_IMAGE], style=_CONTOUR_STYLE)
def test_global_contour_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
# By default the coastline feature will be drawn after patches.
# By setting zorder we can ensure our scatter points are drawn
# after the coastlines.
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data)
@ImageTesting(['global_map'],
tolerance=16 if ccrs.PROJ4_VERSION < (4, 9) else 0.1)
def test_global_map():
plt.axes(projection=ccrs.Robinson())
# ax.coastlines()
# ax.gridlines(5)
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@pytest.mark.natural_earth
@ImageTesting(['simple_global'])
def test_simple_global():
plt.axes(projection=ccrs.PlateCarree())
plt.gca().coastlines()
# produces a global map, despite not having needed to set the limits
@pytest.mark.natural_earth
@ImageTesting(['multiple_projections4' if ccrs.PROJ4_VERSION < (5, 0, 0)
else 'multiple_projections5'])
def test_multiple_projections():
projections = [ccrs.PlateCarree(),
ccrs.Robinson(),
ccrs.RotatedPole(pole_latitude=45, pole_longitude=180),
ccrs.OSGB(),
ccrs.TransverseMercator(),
ccrs.Mercator(
globe=ccrs.Globe(semimajor_axis=math.degrees(1)),
min_latitude=-85., max_latitude=85.),
ccrs.LambertCylindrical(),
ccrs.Miller(),
ccrs.Gnomonic(),
ccrs.Stereographic(),
ccrs.NorthPolarStereo(),
ccrs.SouthPolarStereo(),
ccrs.Orthographic(),
ccrs.Mollweide(),
ccrs.InterruptedGoodeHomolosine(),
ccrs.EckertI(),
ccrs.EckertII(),
ccrs.EckertIII(),
ccrs.EckertIV(),
ccrs.EckertV(),
ccrs.EckertVI(),
]
rows = np.ceil(len(projections) / 5)
fig = plt.figure(figsize=(10, 2 * rows))
for i, prj in enumerate(projections, 1):
ax = fig.add_subplot(rows, 5, i, projection=prj)
ax.set_global()
ax.coastlines()
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@pytest.mark.skipif(ccrs.PROJ4_VERSION < (5, 2, 0),
reason='Proj is too old.')
@pytest.mark.natural_earth
@ImageTesting(['multiple_projections520'])
def test_multiple_projections_520():
# Test projections added in Proj 5.2.0.
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.EqualEarth())
ax.set_global()
ax.coastlines()
ax.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
ax.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
ax.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
def test_cursor_values():
ax = plt.axes(projection=ccrs.NorthPolarStereo())
x, y = np.array([-969100.]), np.array([-4457000.])
r = ax.format_coord(x, y)
assert (r.encode('ascii', 'ignore') ==
six.b('-9.691e+05, -4.457e+06 (50.716617N, 12.267069W)'))
ax = plt.axes(projection=ccrs.PlateCarree())
x, y = np.array([-181.5]), np.array([50.])
r = ax.format_coord(x, y)
assert (r.encode('ascii', 'ignore') ==
six.b('-181.5, 50 (50.000000N, 178.500000E)'))
ax = plt.axes(projection=ccrs.Robinson())
x, y = np.array([16060595.2]), np.array([2363093.4])
r = ax.format_coord(x, y)
assert re.search(six.b('1.606e\\+07, 2.363e\\+06 '
'\\(22.09[0-9]{4}N, 173.70[0-9]{4}E\\)'),
r.encode('ascii', 'ignore'))
plt.close()
@pytest.mark.natural_earth
@ImageTesting(['natural_earth_interface'], tolerance=_ROB_TOL)
def test_axes_natural_earth_interface():
rob = ccrs.Robinson()
ax = plt.axes(projection=rob)
with warnings.catch_warnings(record=True) as all_warnings:
warnings.simplefilter('always')
ax.natural_earth_shp('rivers_lake_centerlines', edgecolor='black',
facecolor='none')
ax.natural_earth_shp('lakes', facecolor='blue')
assert len(all_warnings) == 2
for warning in all_warnings:
msg = str(warning.message)
assert 'deprecated' in msg
assert 'add_feature' in msg
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_global_wrap1'])
def test_pcolormesh_global_with_wrap1():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds = np.linspace(0, 360, nx, endpoint=True)
ybnds = np.linspace(-90, 90, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(
['pcolormesh_global_wrap2'],
tolerance=1.8 if (5, 0, 0) <= ccrs.PROJ4_VERSION < (5, 1, 0) else 0.5)
def test_pcolormesh_global_with_wrap2():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds, xstep = np.linspace(0, 360, nx - 1, retstep=True, endpoint=True)
ybnds, ystep = np.linspace(-90, 90, ny - 1, retstep=True, endpoint=True)
xbnds -= xstep / 2
ybnds -= ystep / 2
xbnds = np.append(xbnds, xbnds[-1] + xstep)
ybnds = np.append(ybnds, ybnds[-1] + ystep)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(
['pcolormesh_global_wrap3'],
tolerance=2.4 if (5, 0, 0) <= ccrs.PROJ4_VERSION < (5, 1, 0) else _ROB_TOL)
def test_pcolormesh_global_with_wrap3():
nx, ny = 33, 17
xbnds = np.linspace(-1.875, 358.125, nx, endpoint=True)
ybnds = np.linspace(91.25, -91.25, ny, endpoint=True)
xbnds, ybnds = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(xbnds)) + np.cos(np.deg2rad(ybnds)))
# this step is not necessary, but makes the plot even harder to do (i.e.
# it really puts cartopy through its paces)
ybnds = np.append(ybnds, ybnds[:, 1:2], axis=1)
xbnds = np.append(xbnds, xbnds[:, 1:2] + 360, axis=1)
data = np.ma.concatenate([data, data[:, 0:1]], axis=1)
data = data[:-1, :-1]
data = np.ma.masked_greater(data, 2.6)
ax = plt.subplot(311, projection=ccrs.PlateCarree(-45))
c = plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
assert c._wrapped_collection_fix is not None, \
'No pcolormesh wrapping was done when it should have been.'
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(312, projection=ccrs.PlateCarree(-1.87499952))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(313, projection=ccrs.Robinson(-2))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_limited_area_wrap'],
tolerance=1.41 if MPL_VERSION >= '2.1.0' else 0.7)
def test_pcolormesh_limited_area_wrap():
# make up some realistic data with bounds (such as data from the UM's North
# Atlantic Europe model)
nx, ny = 22, 36
xbnds = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(221, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax = plt.subplot(222, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_global()
# draw the same plot, only more zoomed in, and using the 2d versions
# of the coordinates (just to test that 1d and 2d are both suitably
# being fixed)
ax = plt.subplot(223, projection=ccrs.PlateCarree())
plt.pcolormesh(x, y, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_extent([-70, 0, 0, 80])
ax = plt.subplot(224, projection=rp)
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_single_column_wrap'], tolerance=0.7)
def test_pcolormesh_single_column_wrap():
# Check a wrapped mesh like test_pcolormesh_limited_area_wrap, but only use
# a single column, which could break depending on how wrapping is
# determined.
ny = 36
xbnds = np.array([360.9485619, 364.71999105])
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(111, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_global()
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_goode_wrap'])
def test_pcolormesh_goode_wrap():
# global data on an Interrupted Goode Homolosine projection
# shouldn't spill outside projection boundary
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.InterruptedGoodeHomolosine())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_mercator_wrap'])
def test_pcolormesh_mercator_wrap():
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['quiver_plate_carree'])
def test_quiver_plate_carree():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['quiver_rotated_pole'])
def test_quiver_rotated_pole():
nx, ny = 22, 36
x = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
y = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = -2. * np.cos(2. * np.deg2rad(y2d)) * np.sin(np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plot_extent = [x[0], x[-1], y[0], y[-1]]
# plot on native projection
plt.figure(figsize=(6, 6))
ax = plt.subplot(211, projection=rp)
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on different projection
ax = plt.subplot(212, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=rp)
@pytest.mark.natural_earth
@ImageTesting(['quiver_regrid'], tolerance=1.3)
def test_quiver_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=30)
@pytest.mark.natural_earth
@ImageTesting(['quiver_regrid_with_extent'])
def test_quiver_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=10, target_extent=target_extent)
@pytest.mark.natural_earth
@ImageTesting(['barbs_plate_carree'])
def test_barbs():
x = np.arange(-60, 45, 5)
y = np.arange(30, 75, 5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, length=4, linewidth=.25)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(), length=4, linewidth=.25)
@pytest.mark.natural_earth
@ImageTesting(['barbs_regrid'])
def test_barbs_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.4, regrid_shape=20)
@pytest.mark.natural_earth
@ImageTesting(['barbs_regrid_with_extent'])
def test_barbs_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.25, regrid_shape=10,
target_extent=target_extent)
@pytest.mark.natural_earth
@ImageTesting(['barbs_1d'])
def test_barbs_1d():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-21, 40, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@pytest.mark.natural_earth
@ImageTesting(['barbs_1d_transformed'])
def test_barbs_1d_transformed():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-20, 31, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@pytest.mark.natural_earth
@ImageTesting([_STREAMPLOT_IMAGE], style=_STREAMPLOT_STYLE)
def test_streamplot():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.streamplot(x, y, u, v, transform=ccrs.PlateCarree(),
density=(1.5, 2), color=mag, linewidth=2*mag)
| lgpl-3.0 |
jmighion/ansible | test/units/modules/network/ios/test_ios_vrf.py | 20 | 6195 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_vrf
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosVrfModule(TestIosModule):
module = ios_vrf
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.ios.ios_vrf.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_vrf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_exec_command = patch('ansible.modules.network.ios.ios_vrf.exec_command')
self.exec_command = self.mock_exec_command.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_exec_command.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('ios_vrf_config.cfg')
self.exec_command.return_value = (0, load_fixture('ios_vrf_config.cfg').strip(), None)
self.load_config.return_value = None
def test_ios_vrf_name(self):
set_module_args(dict(name='test_4'))
commands = ['vrf definition test_4', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_name_unchanged(self):
set_module_args(dict(name='test_1', rd='1:100', description='test vrf 1'))
self.execute_module()
def test_ios_vrf_description(self):
set_module_args(dict(name='test_1', description='test string'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_rd(self):
set_module_args(dict(name='test_1', rd='2:100'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 2:100']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_interfaces(self):
set_module_args(dict(name='test_1', interfaces=['Ethernet1']))
commands = ['interface Ethernet2', 'no vrf forwarding test_1',
'interface Ethernet1', 'vrf forwarding test_1',
'ip address 1.2.3.4/5']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_state_absent(self):
set_module_args(dict(name='test_1', state='absent'))
commands = ['no vrf definition test_1']
self.execute_module(changed=True, commands=commands)
def test_ios_vrf_purge_all(self):
set_module_args(dict(purge=True))
commands = ['no vrf definition test_1', 'no vrf definition test_2',
'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrf_purge_all_but_one(self):
set_module_args(dict(name='test_1', purge=True))
commands = ['no vrf definition test_2', 'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_no_purge(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]
set_module_args(dict(vrfs=vrfs))
commands = ['vrf definition test_4',
'address-family ipv4', 'exit',
'address-family ipv6', 'exit']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_purge(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]
set_module_args(dict(vrfs=vrfs, purge=True))
commands = ['vrf definition test_4',
'address-family ipv4', 'exit',
'address-family ipv6', 'exit',
'no vrf definition test_2',
'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_global_arg(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string',
'vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrfs_local_override_description(self):
vrfs = [{'name': 'test_1', 'description': 'test vrf 1'},
{'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrfs_local_override_state(self):
vrfs = [{'name': 'test_1', 'state': 'absent'},
{'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['no vrf definition test_1', 'vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit',
'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
| gpl-3.0 |
dannyperry571/theapprentice | script.module.livestreamer/lib/livestreamer/plugins/euronews.py | 34 | 1340 | import re
from itertools import chain
from livestreamer.compat import urlparse
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HLSStream, HTTPStream
from livestreamer.plugin.api.support_plugin import common_jwplayer as jwplayer
_url_re = re.compile("http(s)?://(\w+\.)?euronews.com")
class Euronews(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _create_stream(self, source):
url = source["file"]
if urlparse(url).path.endswith("m3u8"):
streams = HLSStream.parse_variant_playlist(self.session, url)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
else:
name = source.get("label", "vod")
yield name, HTTPStream(self.session, url)
def _get_streams(self):
res = http.get(self.url)
playlist = jwplayer.parse_playlist(res)
if not playlist:
return
for item in playlist:
streams = map(self._create_stream, item["sources"])
# TODO: Replace with "yield from" when dropping Python 2.
for stream in chain.from_iterable(streams):
yield stream
__plugin__ = Euronews
| gpl-2.0 |
702nADOS/sumo | tools/createVehTypeDistribution.py | 1 | 9350 | #!/usr/bin/env python
"""
@file createVehTypeDistribution.py
@author Mirko Barthauer (Technische Universitaet Braunschweig, Institut fuer Verkehr und Stadtbauwesen)
@author Jakob Erdmann
@author Michael Behrisch
@date 2016-06-09
@version $Id: createVehTypeDistribution.py 22929 2017-02-13 14:38:39Z behrisch $
Creates a vehicle type distribution with a number of representative car-following parameter sets. Optional parameters can be viewed by using the --help switch.
Mandatory input:
path to config file - defines the car-following model parameter distributions for one single vehicle type distribution
In the config file, one line is used per vehicle type attribute. The syntax is:
nameOfAttribute; valueOfAttribute [; limits]
ValueOfAttribute can be a string, a scalar value or a distribution definition. Available distributions and its syntax are:
"normal(mu,sd)" with mu and sd being floating numbers: Normal distribution with mean mu and standard deviation sd.
"uniform(a,b)" with limits a and b being floating numbers: Uniform distribution between a and b.
"gamma(alpha,beta)" with parameters alpha and beta: Gamma distribution.
Limits are optional and defined as the allowed interval: e.g. "[0,1]" or "[3.5,5.0]". By default, no negative values are accepted but have to be enabled by
a negative lower limit.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2010-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
import os
import sys
import csv
import re
import xml.dom.minidom
import random
import argparse
class FixDistribution(object):
def __init__(self, params, isNumeric=True):
if isNumeric:
self._params = tuple([float(p) for p in params])
else:
self._params = params
self._limits = (0, None)
self._isNumeric = isNumeric
def setLimits(self, limits):
self._limits = limits
def sampleValue(self):
value = self._sampleValue()
if self._isNumeric:
if self._limits[0] is not None and value < self._limits[0]:
value = self._limits[0]
elif self._limits[1] is not None and value > self._limits[1]:
value = self._limits[1]
return value
def sampleValueString(self, decimalPlaces):
if self._isNumeric:
decimalPattern = "%." + str(decimalPlaces) + "f"
return decimalPattern % self.sampleValue()
return self.sampleValue()
def _sampleValue(self):
return self._params[0]
class NormalDistribution(FixDistribution):
def __init__(self, loc, scale):
FixDistribution.__init__(self, (loc, scale))
def _sampleValue(self):
return random.normalvariate(self._params[0], self._params[1])
class UniformDistribution(FixDistribution):
def __init__(self, lower, upper):
FixDistribution.__init__(self, (lower, upper))
def _sampleValue(self):
return random.uniform(self._params[0], self._params[1])
class GammaDistribution(FixDistribution):
def __init__(self, loc, scale):
FixDistribution.__init__(self, (loc, 1.0 / scale))
def _sampleValue(self):
return random.gammavariate(self._params[0], self._params[1])
def get_options(args=None):
argParser = argparse.ArgumentParser()
argParser.add_argument(
"configFile", help="file path of the config file which defines the car-following parameter distributions")
argParser.add_argument(
"-o", "--output-file", dest="outputFile", default="vTypeDistributions.add.xml", help="file path of the output file (if the file already exists, the script tries to insert the distribution node into it)")
argParser.add_argument(
"-n", "--name", dest="vehDistName", default="vehDist", help="alphanumerical ID used for the created vehicle type distribution")
argParser.add_argument(
"-s", "--size", type=int, default=100, dest="vehicleCount", help="number of vTypes in the distribution")
argParser.add_argument(
"-d", "--decimal-places", type=int, default=3, dest="decimalPlaces", help="number of decimal places for numeric attribute values")
argParser.add_argument("--seed", type=int, help="random seed", default=42)
options = argParser.parse_args()
return options
def readConfigFile(filePath):
result = {}
distSyntaxes = {'normal': 'normal\(\s*(-?[0-9]+(\.[0-9]+)?)\s*,\s*([0-9]+(\.[0-9]+)?)\s*\)',
'uniform': 'uniform\(\s*(-?[0-9]+(\.[0-9]+)?)\s*,\s*(-?[0-9]+(\.[0-9]+)?)\s*\)',
'gamma': 'gamma\(\s*([0-9]+(\.[0-9]+)?)\s*,\s*([0-9]+(\.[0-9]+)?)\s*\)'}
with open(filePath, 'rb') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
parName = None
lowerLimit = 0
upperLimit = None
value = None
if len(row) >= 2:
if len(row[0].strip()) > 0:
parName = row[0].strip()
# check if attribute value matches given distribution
# syntax
attValue = row[1].strip()
distFound = False
for distName, distSyntax in distSyntaxes.items():
items = re.findall(distSyntax, attValue)
distFound = len(items) > 0
if distFound: # found distribution
distPar1 = float(items[0][0])
distPar2 = float(items[0][2])
if distName == 'normal':
value = NormalDistribution(distPar1, distPar2)
elif distName == 'uniform':
value = UniformDistribution(distPar1, distPar2)
elif distName == 'gamma':
value = GammaDistribution(distPar1, distPar2)
break
if not distFound:
isNumeric = len(re.findall(
'(-?[0-9]+(\.[0-9]+)?)', attValue)) > 0
value = FixDistribution((attValue,), isNumeric)
# get optional limits
if len(row) == 3:
limitValue = row[2].strip()
items = re.findall(
'\[\s*(-?[0-9]+(\.[0-9]+)?)\s*,\s*(-?[0-9]+(\.[0-9]+)?)\s*\]', limitValue)
if len(items) > 0:
lowerLimit = float(items[0][0])
upperLimit = float(items[0][2])
value.setLimits((lowerLimit, upperLimit))
result[parName] = value
return result
def main(options):
if options.seed:
random.seed(options.seed)
vTypeParameters = readConfigFile(options.configFile)
useExistingFile = False
if os.path.exists(options.outputFile):
try:
domTree = xml.dom.minidom.parse(options.outputFile)
except Exception as e:
sys.exit("Cannot parse existing %s. Error: %s" %
(options.outputFile, str(e)))
useExistingFile = True
else:
domTree = xml.dom.minidom.Document()
vTypeDistNode = domTree.createElement("vTypeDistribution")
vTypeDistNode.setAttribute("id", options.vehDistName)
for i in range(0, options.vehicleCount):
vTypeNode = domTree.createElement("vType")
vTypeNode.setAttribute("id", options.vehDistName + str(i))
for attName, attValue in vTypeParameters.items():
vTypeNode.setAttribute(
attName, attValue.sampleValueString(options.decimalPlaces))
vTypeDistNode.appendChild(vTypeNode)
existingDistNodes = domTree.getElementsByTagName("vTypeDistribution")
replaceNode = None
for existingDistNode in existingDistNodes:
if existingDistNode.hasAttribute("id") and existingDistNode.getAttribute("id") == options.vehDistName:
replaceNode = existingDistNode
break
if useExistingFile:
if replaceNode is not None:
replaceNode.parentNode.replaceChild(vTypeDistNode, replaceNode)
else:
domTree.documentElement.appendChild(vTypeDistNode)
else:
additionalNode = domTree.createElement("additional")
additionalNode.setAttribute(
"xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
additionalNode.setAttribute(
"xsi:noNamespaceSchemaLocation", "http://sumo.dlr.de/xsd/additional_file.xsd")
additionalNode.appendChild(vTypeDistNode)
domTree.appendChild(additionalNode)
try:
fileHandle = open(options.outputFile, "wb")
domTree.documentElement.writexml(
fileHandle, addindent=" ", newl="\n")
fileHandle.close()
except Exception as e:
sys.exit(str(e))
sys.stdout.write("Output written to %s" % options.outputFile)
if __name__ == "__main__":
options = get_options(sys.argv)
main(options)
| gpl-3.0 |
amitsela/beam | sdks/python/apache_beam/examples/cookbook/coders.py | 9 | 3380 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A workflow using custom JSON-based coders for text sources and sinks.
The input file contains a JSON string on each line describing a match
record using the following schema:
{'guest': [TEAM_NAME, GOALS], 'host': [TEAM_NAME, GOALS]}
The output file will contain the computed points for each team with one team
per line in the following format:
[TEAM_NAME, POINTS]
"""
from __future__ import absolute_import
import argparse
import json
import logging
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
class JsonCoder(object):
"""A JSON coder interpreting each line as a JSON string."""
def encode(self, x):
return json.dumps(x)
def decode(self, x):
return json.loads(x)
def compute_points(record):
"""Compute points based on the record containing the match result.
The function assigns 3 points for a win, 1 point for a draw, and 0 points for
a loss (see http://en.wikipedia.org/wiki/Three_points_for_a_win).
"""
host_name, host_goals = record['host']
guest_name, guest_goals = record['guest']
if host_goals == guest_goals:
yield host_name, 1
yield guest_name, 1
elif host_goals > guest_goals:
yield host_name, 3
yield guest_name, 0
else:
yield host_name, 0
yield guest_name, 3
def run(argv=None):
"""Runs the workflow computing total points from a collection of matches."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True,
help='Input file to process.')
parser.add_argument('--output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=pipeline_options)
p = beam.Pipeline(argv=pipeline_args)
(p # pylint: disable=expression-not-assigned
| 'read' >> ReadFromText(known_args.input, coder=JsonCoder())
| 'points' >> beam.FlatMap(compute_points)
| beam.CombinePerKey(sum)
| 'write' >> WriteToText(known_args.output, coder=JsonCoder()))
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 |
PourroyJean/performance_modelisation | script/data visualisation/venv/lib/python3.6/encodings/cp1140.py | 272 | 13105 | """ Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\u20ac' # 0x9F -> EURO SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
unclev/vk.unclev.ru | extensions/sticker.py | 1 | 1937 | # coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2015.
import base64
from tempfile import mktemp
from cStringIO import StringIO
sticker_url = re.compile(r"^Sticker\:\s(http[s]?\:\/\/[a-zA-Z0-9\._\/]+)$")
try:
from PIL import Image
except ImportError:
logger.warning("sticker: not enabling RGB conversion because PIL is not installed")
ENABLE_RGB_CONVERSION = False
if not isdef("STICKER_SIZE"):
STICKER_SIZE = "128"
GLOBAL_USER_SETTINGS["send_stickers"] = {"label": "Send stickers with XHTML-IM",
"desc": "If set, transport would send images for stickers instead of URLs (requires client-side support)", "value": 0}
def convertImage(data):
outfile = mktemp()
io = StringIO(data)
image = Image.open(io)
image.convert("RGB").save(outfile, "JPEG", quality=RGB_CONVERSION_QUALITY)
data = rFile(outfile)
try:
os.remove(outfile)
except Exception:
crashLog("convertImage")
return data
def sendSticker(msg, destination, source):
body = msg.getBody()
if body:
if msg.getType() == "groupchat":
user = Chat.getUserObject(destination)
else:
user = Transport.get(destination)
if user and user.settings.send_stickers:
url = sticker_url.search(body)
if url:
url = url.group(1).replace("256b", STICKER_SIZE)
data = urllib.urlopen(url).read()
if data:
mime = "png"
if isdef("ENABLE_RGB_CONVERSION") and ENABLE_RGB_CONVERSION:
data = convertImage(data)
mime = "jpeg"
data = base64.b64encode(data)
xhtml = msg.setTag("html", namespace=xmpp.NS_XHTML_IM)
xbody = xhtml.setTag("body", namespace="http://www.w3.org/1999/xhtml")
xbody.setTag("br")
xbody.setTag("img", {"src": "data:image/%s;base64,%s" % (mime, data), "alt": "img"})
def initStickerSender():
if xmpp.NS_GROUPCHAT in TransportFeatures:
registerHandler("msg03g", sendSticker)
registerHandler("evt01", initStickerSender)
registerHandler("msg03", sendSticker) | mit |
vine/buck | third-party/py/pex/pex/pex_bootstrapper.py | 50 | 3196 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import contextlib
import os
import sys
import zipfile
__all__ = ('bootstrap_pex',)
def pex_info_name(entry_point):
"""Return the PEX-INFO for an entry_point"""
return os.path.join(entry_point, 'PEX-INFO')
def is_compressed(entry_point):
return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point))
def read_pexinfo_from_directory(entry_point):
with open(pex_info_name(entry_point), 'rb') as fp:
return fp.read()
def read_pexinfo_from_zip(entry_point):
with contextlib.closing(zipfile.ZipFile(entry_point)) as zf:
return zf.read('PEX-INFO')
def read_pex_info_content(entry_point):
"""Return the raw content of a PEX-INFO."""
if is_compressed(entry_point):
return read_pexinfo_from_zip(entry_point)
else:
return read_pexinfo_from_directory(entry_point)
def get_pex_info(entry_point):
"""Return the PexInfo object for an entry point."""
from . import pex_info
pex_info_content = read_pex_info_content(entry_point)
if pex_info_content:
return pex_info.PexInfo.from_json(pex_info_content)
raise ValueError('Invalid entry_point: %s' % entry_point)
# TODO(wickman) Remove once resolved (#91):
# https://bitbucket.org/pypa/setuptools/issue/154/build_zipmanifest-results-should-be
def monkeypatch_build_zipmanifest():
import pkg_resources
if not hasattr(pkg_resources, 'build_zipmanifest'):
return
old_build_zipmanifest = pkg_resources.build_zipmanifest
def memoized_build_zipmanifest(archive, memo={}):
if archive not in memo:
memo[archive] = old_build_zipmanifest(archive)
return memo[archive]
pkg_resources.build_zipmanifest = memoized_build_zipmanifest
def find_in_path(target_interpreter):
if os.path.exists(target_interpreter):
return target_interpreter
for directory in os.getenv('PATH', '').split(os.pathsep):
try_path = os.path.join(directory, target_interpreter)
if os.path.exists(try_path):
return try_path
def maybe_reexec_pex():
from .variables import ENV
if not ENV.PEX_PYTHON:
return
from .common import die
from .tracer import TRACER
target_python = ENV.PEX_PYTHON
target = find_in_path(target_python)
if not target:
die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)
if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable):
TRACER.log('Detected PEX_PYTHON, re-exec to %s' % target)
ENV.delete('PEX_PYTHON')
os.execve(target, [target_python] + sys.argv, ENV.copy())
def bootstrap_pex(entry_point):
from .finders import register_finders
monkeypatch_build_zipmanifest()
register_finders()
maybe_reexec_pex()
from . import pex
pex.PEX(entry_point).execute()
def bootstrap_pex_env(entry_point):
"""Bootstrap the current runtime environment using a given pex."""
from .environment import PEXEnvironment
from .finders import register_finders
from .pex_info import PexInfo
monkeypatch_build_zipmanifest()
register_finders()
PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()
| apache-2.0 |
Elandril/SickRage | lib/dateutil/parser.py | 48 | 34280 | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
from __future__ import unicode_literals
__license__ = "Simplified BSD"
import datetime
import string
import time
import sys
import os
import collections
try:
from io import StringIO
except ImportError:
from io import StringIO
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year//100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
if skipped_tokens:
return ret, skipped_tokens
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, fuzzy_with_tokens=False):
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
# keep up with the last token skipped so we can recombine
# consecutively skipped tokens (-2 for when i begins at 0).
last_skipped_token_i = -2
skipped_tokens = list()
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i == len_l and l[i-2] == ' ' and info.hms(l[i-3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i-3]) + 1
if idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
if last_skipped_token_i == i - 1:
# recombine the tokens
skipped_tokens[-1] += l[i]
else:
# just append
skipped_tokens.append(l[i])
last_skipped_token_i = i
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
return res, None
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
| gpl-3.0 |
tsufiev/horizon | openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py | 2 | 4176 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
page_title = _("Create Key Pair")
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = project_forms.ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
page_title = _("Import Key Pair")
def get_object_id(self, keypair):
return keypair.name
class DetailView(views.HorizonTemplateView):
template_name = 'project/access_and_security/keypairs/detail.html'
page_title = _("Key Pair Details")
@memoized.memoized_method
def _get_data(self):
try:
keypair = api.nova.keypair_get(self.request,
self.kwargs['keypair_name'])
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
msg = _('Unable to retrieve details for keypair "%s".')\
% (self.kwargs['keypair_name'])
exceptions.handle(self.request, msg,
redirect=redirect)
return keypair
def get_context_data(self, **kwargs):
"""Gets the context data for keypair."""
context = super(DetailView, self).get_context_data(**kwargs)
context['keypair'] = self._get_data()
return context
class DownloadView(views.HorizonTemplateView):
template_name = 'project/access_and_security/keypairs/download.html'
page_title = _("Download Key Pair")
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
class GenerateView(View):
def get(self, request, keypair_name=None, optional=None):
try:
if optional == "regenerate":
api.nova.keypair_delete(request, keypair_name)
keypair = api.nova.keypair_create(request, keypair_name)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create key pair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(content_type='application/binary')
response['Content-Disposition'] = ('attachment; filename=%s.pem'
% slugify(keypair.name))
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
| apache-2.0 |
wlamond/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
airbnb/knowledge-repo | knowledge_repo/app/routes/index.py | 1 | 11569 | """ Define the routes that show all the posts.
This includes:
- /feed
- /cluster
- /table
- /favorites
"""
import os
import json
from collections import namedtuple
from flask import request, render_template, redirect, Blueprint, current_app, make_response
from flask_login import login_required
from sqlalchemy import case, desc
from .. import permissions
from ..proxies import db_session, current_repo
from ..utils.posts import get_posts
from ..models import Post, Tag, User, PageView
from ..utils.requests import from_request_get_feed_params
from ..utils.render import render_post_tldr
blueprint = Blueprint(
'index', __name__, template_folder='../templates', static_folder='../static')
def has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments)
@blueprint.route("/site-map")
@PageView.logged
def site_map():
links = []
for rule in current_app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
# if "GET" in rule.methods and has_no_empty_params(rule):
# url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((str(rule), rule.endpoint))
# links is now a list of url, endpoint tuples
return '<br />'.join(str(link) for link in links)
@blueprint.route('/')
@PageView.logged
def render_index():
return redirect('/feed')
@blueprint.route('/favorites')
@PageView.logged
@login_required
def render_favorites():
""" Renders the index-feed view for posts that are liked """
feed_params = from_request_get_feed_params(request)
user_id = feed_params['user_id']
user = (db_session.query(User)
.filter(User.id == user_id)
.first())
posts = user.liked_posts
post_stats = {post.path: {'all_views': post.view_count,
'distinct_views': post.view_user_count,
'total_likes': post.vote_count,
'total_comments': post.comment_count} for post in posts}
return render_template("index-feed.html",
feed_params=feed_params,
posts=posts,
post_stats=post_stats,
top_header='Favorites')
@blueprint.route('/feed')
@PageView.logged
@permissions.index_view.require()
def render_feed():
""" Renders the index-feed view """
feed_params = from_request_get_feed_params(request)
posts, post_stats = get_posts(feed_params)
for post in posts:
post.tldr = render_post_tldr(post)
return render_template("index-feed.html",
feed_params=feed_params,
posts=posts,
post_stats=post_stats,
top_header='Knowledge Feed')
@blueprint.route('/table')
@PageView.logged
@permissions.index_view.require()
def render_table():
"""Renders the index-table view"""
feed_params = from_request_get_feed_params(request)
posts, post_stats = get_posts(feed_params)
# TODO reference stats inside the template
return render_template("index-table.html",
posts=posts,
post_stats=post_stats,
top_header="Knowledge Table",
feed_params=feed_params)
@blueprint.route('/cluster')
@PageView.logged
@permissions.index_view.require()
def render_cluster():
""" Render the cluster view """
# we don't use the from_request_get_feed_params because some of the
# defaults are different
filters = request.args.get('filters', '')
sort_by = request.args.get('sort_by', 'alpha')
group_by = request.args.get('group_by', 'folder')
request_tag = request.args.get('tag')
sort_desc = not bool(request.args.get('sort_asc', ''))
excluded_tags = current_app.config.get('EXCLUDED_TAGS', [])
post_query = (db_session.query(Post)
.filter(Post.is_published)
.filter(~Post.tags.any(Tag.name.in_(excluded_tags))))
if filters:
filter_set = filters.split(" ")
for elem in filter_set:
elem_regexp = "%," + elem + ",%"
post_query = post_query.filter(Post.keywords.like(elem_regexp))
ClusterPost = namedtuple(
'ClusterPost',
['name', 'is_post', 'children_count', 'content']
)
if group_by == "author":
author_to_posts = {}
authors = (db_session.query(User).all())
for author in authors:
author_posts = [
ClusterPost(name=post.title, is_post=True,
children_count=0, content=post)
for post in author.posts
if post.is_published and not post.contains_excluded_tag
]
if author_posts:
author_to_posts[author.format_name] = author_posts
grouped_data = [
ClusterPost(name=k, is_post=False,
children_count=len(v), content=v)
for (k, v) in author_to_posts.items()
]
elif group_by == "tags":
tags_to_posts = {}
all_tags = (db_session.query(Tag)
.filter(~Tag.name.in_(excluded_tags))
.all())
for tag in all_tags:
tag_posts = [
ClusterPost(name=post.title, is_post=True,
children_count=0, content=post)
for post in tag.posts
if post.is_published and not post.contains_excluded_tag
]
if tag_posts:
tags_to_posts[tag.name] = tag_posts
grouped_data = [
ClusterPost(name=k, is_post=False,
children_count=len(v), content=v)
for (k, v) in tags_to_posts.items()
]
elif group_by == "folder":
posts = post_query.all()
# group by folder
folder_to_posts = {}
for post in posts:
folder_hierarchy = post.path.split('/')
cursor = folder_to_posts
for folder in folder_hierarchy[:-1]:
if folder not in cursor:
cursor[folder] = {}
cursor = cursor[folder]
cursor[folder_hierarchy[-1]] = post
def unpack(d):
"""
Recusively unpack folder_to_posts
"""
children = []
count = 0
for k, v in d.items():
if isinstance(v, dict):
l, contents = unpack(v)
count += l
children.append(
ClusterPost(name=k, is_post=False,
children_count=l, content=contents)
)
else:
count += 1
children.append(
ClusterPost(name=k, is_post=True,
children_count=0, content=v)
)
return count, children
_, grouped_data = unpack(folder_to_posts)
else:
raise ValueError("Group by `{}` not understood.".format(group_by))
def rec_sort(content, sort_by):
sorted_content = []
for c in content:
if c.is_post:
sorted_content.append(c)
else:
sorted_content.append(ClusterPost(
name=c.name,
is_post=c.is_post,
children_count=c.children_count,
content=rec_sort(c.content, sort_by)
))
# put folders above posts
clusters = [c for c in sorted_content if not c.is_post]
posts = [c for c in sorted_content if c.is_post]
if sort_by == "alpha":
return (
sorted(clusters, key=lambda x: x.name) +
sorted(posts, key=lambda x: x.name)
)
else:
return (
sorted(clusters, key=lambda x: x.children_count, reverse=sort_desc) +
sorted(posts, key=lambda x: x.children_count, reverse=sort_desc)
)
grouped_data = rec_sort(grouped_data, sort_by)
return render_template("index-cluster.html",
grouped_data=grouped_data,
filters=filters,
sort_by=sort_by,
group_by=group_by,
tag=request_tag)
@blueprint.route('/create')
@blueprint.route('/create/<knowledge_format>')
@PageView.logged
@permissions.post_view.require()
def create(knowledge_format=None):
""" Renders the create knowledge view """
if knowledge_format is None:
return render_template("create-knowledge.html",
web_editor_enabled=current_app.config['WEB_EDITOR_PREFIXES'] != [])
cur_dir = os.path.dirname(os.path.realpath(__file__))
knowledge_template = "knowledge_template.{}".format(knowledge_format)
filename = os.path.join(cur_dir, '../../templates', knowledge_template)
response = make_response(open(filename).read())
response.headers["Content-Disposition"] = "attachment; filename=" + knowledge_template
return response
@blueprint.route('/ajax/index/typeahead', methods=['GET', 'POST'])
def ajax_post_typeahead():
if not permissions.index_view.can():
return '[]'
# this a string of the search term
search_terms = request.args.get('search', '')
search_terms = search_terms.split(" ")
case_statements = []
for term in search_terms:
case_stmt = case([(Post.keywords.ilike('%' + term.strip() + '%'), 1)], else_=0)
case_statements += [case_stmt]
match_score = sum(case_statements).label("match_score")
posts = (db_session.query(Post, match_score)
.filter(Post.status == current_repo.PostStatus.PUBLISHED.value)
.order_by(desc(match_score))
.limit(5)
.all())
matches = []
for (post, count) in posts:
authors_str = [author.format_name for author in post.authors]
typeahead_entry = {'author': authors_str,
'title': str(post.title),
'path': str(post.path),
'keywords': str(post.keywords)}
matches += [typeahead_entry]
return json.dumps(matches)
@blueprint.route('/ajax/index/typeahead_tags')
@blueprint.route('/ajax_tags_typeahead', methods=['GET'])
def generate_tags_typeahead():
if not permissions.index_view.can():
return '[]'
return json.dumps([t[0] for t in db_session.query(Tag.name).all()])
@blueprint.route('/ajax/index/typeahead_users')
@blueprint.route('/ajax_users_typeahead', methods=['GET'])
def generate_users_typeahead():
if not permissions.index_view.can():
return '[]'
return json.dumps([u[0] for u in db_session.query(User.identifier).all()])
@blueprint.route('/ajax/index/typeahead_paths')
@blueprint.route('/ajax_paths_typeahead', methods=['GET'])
def generate_projects_typeahead():
if not permissions.index_view.can():
return '[]'
# return path stubs for all repositories
stubs = ['/'.join(p.split('/')[:-1]) for p in current_repo.dir()]
return json.dumps(list(set(stubs)))
| apache-2.0 |
vialectrum/vialectrum-server | src/deserialize.py | 19 | 14027 | # this code comes from ABE. it can probably be simplified
#
#
import mmap
import string
import struct
import types
from utils import hash_160_to_pubkey_address, hash_160_to_script_address, public_key_to_pubkey_address, hash_encode,\
hash_160
class SerializationError(Exception):
"""Thrown when there's a problem deserializing or serializing."""
class BCDataStream(object):
"""Workalike python implementation of Bitcoin's CDataStream class."""
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self):
return self.read_bytes(1)[0] != chr(0)
def read_int16(self):
return self._read_num('<h')
def read_uint16(self):
return self._read_num('<H')
def read_int32(self):
return self._read_num('<i')
def read_uint32(self):
return self._read_num('<I')
def read_int64(self):
return self._read_num('<q')
def read_uint64(self):
return self._read_num('<Q')
def write_boolean(self, val):
return self.write(chr(1) if val else chr(0))
def write_int16(self, val):
return self._write_num('<h', val)
def write_uint16(self, val):
return self._write_num('<H', val)
def write_int32(self, val):
return self._write_num('<i', val)
def write_uint32(self, val):
return self._write_num('<I', val)
def write_int64(self, val):
return self._write_num('<q', val)
def write_uint64(self, val):
return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
class EnumException(Exception):
pass
class Enumeration:
"""enum-like type
From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
"""
def __init__(self, name, enumList):
self.__doc__ = name
lookup = {}
reverseLookup = {}
i = 0
uniqueNames = []
uniqueValues = []
for x in enumList:
if isinstance(x, types.TupleType):
x, i = x
if not isinstance(x, types.StringType):
raise EnumException("enum name is not a string: %r" % x)
if not isinstance(i, types.IntType):
raise EnumException("enum value is not an integer: %r" % i)
if x in uniqueNames:
raise EnumException("enum name is not unique: %r" % x)
if i in uniqueValues:
raise EnumException("enum value is not unique for %r" % x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_TxIn(vds):
d = {}
d['prevout_hash'] = hash_encode(vds.read_bytes(32))
d['prevout_n'] = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
d['sequence'] = vds.read_uint32()
if scriptSig:
pubkeys, signatures, address = get_address_from_input_script(scriptSig)
else:
pubkeys = []
signatures = []
address = None
d['address'] = address
d['signatures'] = signatures
return d
def parse_TxOut(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['address'] = get_address_from_output_script(scriptPubKey)
d['raw_output_script'] = scriptPubKey.encode('hex')
d['index'] = i
return d
def parse_Transaction(vds, is_coinbase):
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = []
for i in xrange(n_vin):
o = parse_TxIn(vds)
if not is_coinbase:
d['inputs'].append(o)
n_vout = vds.read_compact_size()
d['outputs'] = []
for i in xrange(n_vout):
o = parse_TxOut(vds, i)
#if o['address'] == "None" and o['value']==0:
# print("skipping strange tx output with zero value")
# continue
# if o['address'] != "None":
d['outputs'].append(o)
d['lockTime'] = vds.read_uint32()
return d
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1", 76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
"OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
if i+nSize > len(bytes):
vch = "_INVALID_"+bytes[i:]
i = len(bytes)
else:
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
try:
return (opcodes.whatis(opcode)).replace("OP_", "")
except KeyError:
return "InvalidOp_"+str(opcode)
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0:
result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:" % (opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def get_address_from_input_script(bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except:
# coinbase transactions raise an exception
return [], [], None
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return None, None, public_key_to_pubkey_address(decoded[1][1])
# p2sh transaction, 2 of n
match = [ opcodes.OP_0 ]
while len(match) < len(decoded):
match.append(opcodes.OP_PUSHDATA4)
if match_decoded(decoded, match):
redeemScript = decoded[-1][1]
num = len(match) - 2
signatures = map(lambda x:x[1].encode('hex'), decoded[1:-1])
dec2 = [ x for x in script_GetOp(redeemScript) ]
# 2 of 2
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript))
# 2 of 3
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript))
return [], [], None
def get_address_from_output_script(bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except:
return None
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return public_key_to_pubkey_address(decoded[0][1])
# coins sent to black hole
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_0, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return None
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return hash_160_to_pubkey_address(decoded[2][1])
# strange tx
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG, opcodes.OP_NOP]
if match_decoded(decoded, match):
return hash_160_to_pubkey_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
addr = hash_160_to_script_address(decoded[1][1])
return addr
return None
| agpl-3.0 |
synconics/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
Just-D/chromium-1 | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/block_once.py | 14 | 1488 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class BlockOncePage(page_module.Page):
def __init__(self, url, page_set):
super(BlockOncePage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(BlockOncePage, self).RunNavigateSteps(action_runner)
# Test block-once on a POST request.
# Ensure that a subsequent request uses the data reduction proxy.
action_runner.ExecuteJavaScript('''
(function() {
var request = new XMLHttpRequest();
request.open("POST",
"http://chromeproxy-test.appspot.com/default?respBody=T0s=&respStatus=200&flywheelAction=block-once");
request.onload = function() {
var viaProxyRequest = new XMLHttpRequest();
viaProxyRequest.open("GET",
"http://check.googlezip.net/image.png");
viaProxyRequest.send();
};
request.send();
})();
''')
action_runner.Wait(1)
class BlockOnceStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(BlockOnceStorySet, self).__init__()
# Test block-once for a GET request.
urls_list = [
'http://check.googlezip.net/blocksingle/',
]
for url in urls_list:
self.AddStory(BlockOncePage(url, self))
| bsd-3-clause |
Rzaporozhets/robot_tests | setup.py | 3 | 1056 | from setuptools import find_packages, setup
version = '2.4.dev0'
setup(name='op_robot_tests',
version=version,
description="",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'Faker',
'Pillow',
'PyYAML',
'barbecue',
'dateutils',
'dpath',
'haversine',
'iso8601',
'jsonpath-rw',
'munch',
'parse',
'pytz',
'robotframework',
'robotframework-selenium2library',
'selenium < 3.0.dev0',
],
entry_points={
'console_scripts': [
'op_tests = op_robot_tests.runner:runner',
],
}
)
| apache-2.0 |
dimarkov/pyBefit | agents/dynamic_programming.py | 1 | 14476 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 19:14:01 2019
This module contains active inference agents for various experimental tasks.
@author: Dimitrije Markovic
"""
import torch
from torch import zeros, ones, tensor, eye
from torch.distributions import Categorical, Uniform
from numpy import nan_to_num, indices, ravel_multi_index
from itertools import product
from .agent import Discrete
__all__ = [
'BIBanditsFlat',
'BIBanditsDeep'
]
def ntn(x):
return torch.from_numpy(nan_to_num(x))
def mli(x, L):
return ravel_multi_index(x, (L,)*2)
def psi(x):
return torch.digamma(x)
class BIBanditsFlat(Discrete):
'''Agent using backward induction to compute optimal choice.
'''
def __init__(self, pars, runs=1, blocks=1, trials=10):
self.nit = 10
na = pars['na'] # number of choices
self.nl = pars['nl'] # number of arms
self.nf = pars['nf'] # number of features
self.ni = pars['ni'] # number of internal states
super(BIBanditsFlat, self).__init__(runs, blocks, trials, na, None, None)
self.priors = {}
self.initiate_policies()
self.initiate_prior_beliefs()
self.initiate_beliefs_and_expectations()
def initiate_beliefs_and_expectations(self):
self.beliefs = {
'internals': zeros(self.nb, self.runs, self.ni), # internal states
'locations': zeros(self.nb, self.nt + 1, self.runs, self.nl), # selected location/ arm number
'points': zeros(self.nb, self.nt + 1, self.runs, self.nf, dtype=torch.long),
}
self.expectations = {}
def set_parameters(self, x=None, priors=None, set_variables=True):
self.alpha = 100*ones(self.runs)
self.beta = ones(self.runs)
if x is None:
self.a = [ones(self.runs, self.na, self.nf)]
else:
self.a = [x[-1]]
if priors is not None:
self.initiate_prior_beliefs(**priors)
else:
self.initiate_prior_beliefs()
self.eps = 1/20
# choice dependent location transitions
self.Bl = eye(self.na).repeat(self.na, 1, 1)
self.logits = []
def initiate_policies(self):
self.policies = torch.tensor(list(product(range(self.na), repeat=self.nt)))
self.npi = self.policies.shape[0]
def initiate_prior_beliefs(self, **kwargs):
self.priors.setdefault('locations', [])
self.priors.setdefault('policies', [])
self.priors['locations'].append(ones(self.na)/self.na)
self.priors['policies'].append(ones(self.npi)/self.npi)
def update_prior_beliefs(self, b, t, **kwargs):
a = self.a[-1]
a0 = a.sum(-1, keepdim=True)
A = a/a0
# increase uncertainty between segments but keep expectations the same
if b > 0:
v = a0*(1 - self.eps) + self.eps
self.a.append(v*A)
self.update_observation_likelihood()
def update_observation_likelihood(self):
a = self.a[-1]
a0 = self.a[-1].sum(-1, keepdim=True)
self.A = a/a0
def update_beliefs(self, b, t, response_outcomes):
res = response_outcomes[-2]
out1, out2 = response_outcomes[-1]
a = self.a[-1].clone()
a[range(self.runs), res] += out1
self.a.append(a)
self.beliefs['locations'][b, t] = torch.eye(self.na)[res]
self.update_observation_likelihood()
def backward_induction(self, b, t):
runs = self.runs
nt = self.nt
mpt = nt # maximum number of points
np = self.nf - 1 # number of different types of points
thr = 2*mpt//3 # success threshold
# predictions
L = torch.tensor(list(product(range(self.nf), repeat= nt-t)))
bonus1 = torch.sum(L == 1, -1)
bonus2 = torch.sum(L == 2, -1)
actions = torch.arange(self.na)
log_probs = 0.
for k in range(t+1, nt+1):
actions = self.policies[:, k-1]
outcomes = self.A[:, actions]
log_probs += outcomes[..., L[:, k - t - 1]].log()
probs = log_probs.exp()
p1 = self.beliefs['points'][b, t][:, 1].reshape(-1, 1) + bonus1.reshape(1, -1)
p2 = self.beliefs['points'][b, t][:, 2].reshape(-1, 1) + bonus2.reshape(1, -1)
l = (p1 <= thr)*(p2 <= thr)
Q = zeros(runs, self.na)
actions = self.policies[:, t]
for n in range(runs):
for a in range(self.na):
r = 1 - probs[n, :, l[n]].sum(-1)
Q[n, a] = r[a == actions].max()
return Q
def planning(self, b, t, **kwargs):
"""Compute log probability of responses from stimuli values for the given offers.
Here offers encode location of stimuli A and B.
"""
if t == 0:
self.update_prior_beliefs(b, t)
if 'locations' in kwargs:
locations = kwargs['locations']
self.beliefs['locations'][b, t] = 0.
self.beliefs['locations'][b, t, range(self.runs), locations] = 1.
if 'points' in kwargs:
points = kwargs['points']
self.beliefs['points'][b, t] = points
Q = self.backward_induction(b, t)
self.logits.append(self.alpha.reshape(-1, 1)*(2*Q-1))
def sample_responses(self, b, t):
logits = self.logits[-1]
cat = Categorical(logits=logits)
return cat.sample()
class BIBanditsDeep(Discrete):
'''Agent using backward induction to compute optimal choice.
'''
def __init__(self, pars, runs=1, blocks=1, trials=10):
self.nit = 10
na = pars['na'] # number of choices
no = pars['no'] # number of offers
self.nf = pars['nf'] # number of features
self.nc = pars['nc'] # number of contexts
self.ni = pars['ni'] # number of internal states
super(BIBanditsDeep, self).__init__(runs, blocks, trials, na, None, no)
self.priors = {}
self.initiate_policies()
self.initiate_beliefs_and_expectations()
def initiate_beliefs_and_expectations(self):
self.beliefs = {
'context': zeros(self.nb, self.runs, self.nc), # context states 2nd level
'internals': zeros(self.nb, self.runs, self.ni), # internal states 2nd level
'offers': zeros(self.nb, self.nt + 1, self.runs, self.no), # offer state
'locations': zeros(self.nb, self.nt + 1, self.runs, self.na), # selected location/ arm number
'points': zeros(self.nb, self.nt + 1, self.runs, self.nf, dtype=torch.long),
}
self.expectations = {}
def set_parameters(self, x=None, priors=None, set_variables=True):
self.alpha = 100*ones(self.runs)
self.beta = ones(self.runs)
if x is None:
self.a = [ones(self.runs, self.no, self.na, self.nf)]
else:
self.a = [x[-1]]
if priors is not None:
self.initiate_prior_beliefs(**priors)
else:
self.initiate_prior_beliefs()
# context dependent offer transitions
self.cBo = eye(self.no).repeat(self.nc, 1, 1)
M = (ones(self.no, self.no) - eye(self.no)).repeat(self.nc, 1, 1)/(self.no-1)
rho = 1.
self.cBo = rho*self.cBo + (1-rho)*M
rho2 = .95
M = (torch.diag(ones(self.nc-1), 1) + torch.diag(ones(self.nc-2), -2))/(self.nc-1)
self.Bc = rho2*eye(self.nc) + (1-rho2)*M
# choice dependent location transitions
self.Bl = eye(self.na).repeat(self.na, 1, 1)
self.logits = []
def initiate_policies(self):
self.policies = torch.tensor(list(product(range(self.na), repeat=self.nt)))
self.npi = self.policies.shape[0]
def initiate_prior_beliefs(self, **kwargs):
self.priors.setdefault('context', [])
self.priors.setdefault('offers', [])
self.priors.setdefault('locations', [])
self.priors.setdefault('probs', [])
self.priors.setdefault('policies', [])
if 'context' in kwargs:
self.priors['context'].append(kwargs['context'])
else:
self.priors['context'].append(torch.tensor([1., 0., 0.]).repeat(self.runs, 1))
self.priors['offers'].append(2*eye(self.nc).repeat(self.runs, 1, 1) + 1)
self.priors['locations'].append(ones(self.na)/self.na)
self.priors['policies'].append(ones(self.npi)/self.npi)
def update_state_transition_matrix(self):
prior = self.priors['context'][-1]
self.Bo = torch.einsum('nj,jkl->nkl', prior, self.cBo)
def update_observation_likelihood(self):
a = self.a[-1]
a0 = self.a[-1].sum(-1, keepdim=True)
self.A = a/a0
def update_prior_beliefs(self, b, t, **kwargs):
if 'context' in kwargs:
cnt = kwargs['context']
context = torch.eye(self.nc)[cnt]
self.priors['context'].append(context)
else:
context = self.priors['context'][-1]
f = self.priors['offers'][-1]
f0 = f.sum(-1, keepdim=True)
self.D = f/f0
# set prior over offers as a marginal over contexts
self.beliefs['offers'][b, t] = torch.einsum('ni,nij->nj', context, self.D)
self.update_state_transition_matrix()
self.update_observation_likelihood()
def update_second_level(self, b, t, out1, out2):
context = self.priors['context'][-1]
offers = self.beliefs['offers'][b, t]
locations = self.beliefs['locations'][b, t]
alpha = self.beliefs['offers'][b, :t+1]
gamma = self.beliefs['offers'][b, :t+2].clone()
tm = self.Bo
for k in range(t+1):
pred = torch.einsum('ni,nij->nj', alpha[-k-1], tm)
gamma[-k-2] = torch.einsum('nj,nij,ni->ni', gamma[-k-1]/pred, tm, alpha[-k-1])
cp = self.priors['context'][-1]
op = self.priors['offers'][-1]/self.priors['offers'][-1].sum(-1, keepdim=True)
context = (torch.einsum('nco,no->nc', op.log(), gamma[0]) + cp.log()).softmax(-1)
f = torch.einsum('nc,nf->ncf', context, gamma[0]) + self.priors['offers'][-1]
a = torch.einsum('no,nl,nf->nolf', offers, locations, out1) + self.a[-1]
self.a.append(a)
self.priors['offers'].append(f)
self.update_observation_likelihood()
if t == self.nt-1:
self.priors['context'].append(context@self.Bc)
def update_beliefs(self, b, t, response_outcomes):
res = response_outcomes[-2]
out1, out2 = response_outcomes[-1]
offers = self.beliefs['offers'][b, t]
A = self.A[range(self.runs), :, :, out1.argmax(-1)]
post = torch.einsum('ni,nij->nij', offers, A)[range(self.runs), :, res]
post /= post.sum(-1, keepdim=True)
self.beliefs['offers'][b, t] = post
self.beliefs['offers'][b, t + 1] = torch.einsum('nij,ni->nj', self.Bo, self.beliefs['offers'][b, t])
self.beliefs['locations'][b, t] = torch.eye(self.na)[res]
self.update_second_level(b, t, out1, out2)
def backward_induction(self, b, t):
runs = self.runs
nt = self.nt
mpt = nt # maximum number of points
np = self.nf - 1 # number of different types of points
thr = 2*mpt//3 # success threshold
# predictions
L = torch.tensor(list(product(range(self.nf), repeat= nt-t)))
bonus1 = torch.sum(L == 1, -1)
bonus2 = torch.sum(L == 2, -1)
actions = torch.arange(self.na)
log_probs = 0.
for k in range(t+1, nt+1):
actions = self.policies[:, k-1]
beliefs = self.beliefs['offers'][b, k-1]
predictions = torch.einsum('ni,nij->nj', beliefs, self.Bo)
self.beliefs['offers'][b, k] = predictions
outcomes = torch.einsum('ni,nijk->njk', predictions, self.A)[:, actions]
log_probs += outcomes[..., L[:, k - t - 1]].log()
probs = log_probs.exp()
p1 = self.beliefs['points'][b, t][:, 1].reshape(-1, 1) + bonus1.reshape(1, -1)
p2 = self.beliefs['points'][b, t][:, 2].reshape(-1, 1) + bonus2.reshape(1, -1)
l = (p1 <= thr)*(p2 <= thr)
Q = zeros(runs, self.na)
actions = self.policies[:, t]
for n in range(runs):
for a in range(self.na):
r = 1 - probs[n, :, l[n]].sum(-1)
Q[n, a] = r[a == actions].max()
return Q
def planning(self, b, t, **kwargs):
"""Compute log probability of responses from stimuli values for the given offers.
Here offers encode location of stimuli A and B.
"""
if 'context' in kwargs:
self.update_prior_beliefs(b, t, **kwargs)
elif t == 0:
self.update_prior_beliefs(b, t)
if 'locations' in kwargs:
locations = kwargs['locations']
self.beliefs['locations'][b, t] = 0.
self.beliefs['locations'][b, t, range(self.runs), locations] = 1.
if 'points' in kwargs:
points = kwargs['points']
self.beliefs['points'][b, t] = points
if t == 0:
self.update_state_transition_matrix()
Q = self.backward_induction(b, t)
self.logits.append(self.alpha.reshape(-1, 1)*(2*Q-1))
def sample_responses(self, b, t):
logits = self.logits[-1]
cat = Categorical(logits=logits)
return cat.sample()
| mit |
chenyyx/scikit-learn-doc-zh | doc/en/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| gpl-3.0 |
249550148/sports-tv | crawler/sports_tv/spiders/zhibo8.py | 1 | 1727 | # encoding: utf-8
import time
import urlparse
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from sports_tv.items.sport import Sport
class Zhiobo8Spider(BaseSpider):
name = "zhibo8"
allowed_domains = ["www.zhibo8.cc"]
start_urls = [
"http://www.zhibo8.cc/",
]
item_type = "sport"
src = "直播吧"
def parse(self, response):
"""
"""
hxs = HtmlXPathSelector(response)
boxes = hxs.select("//div[@class='box']")
print "get %s days" % len(boxes)
items = []
for box in boxes:
title_bar = box.select("./div[@class='titlebar']")
if not title_bar:
continue
date = title_bar.select("./h2/text()").extract()
content = box.select("./div[@class='content']/ul")
print "date %s" % date
races = content.select("./li")
print "get %s reaces for date %s" % (len(races), date)
for race in races:
item = Sport()
link = race.select("./a[1]/@href").extract()
if len(link) < 1:
continue
link = link[0]
title = race.select("./a[1]/text()").extract()
teams = race.select("./text()").extract()
item["url"] = urlparse.urljoin(response.url, link)
item["title"] = title
item["date"] = date
item['src'] = self.src.decode("utf-8")
item['race_type'] = self.item_type
item['teams_time'] = teams
item['base_url'] = response.url
items.append(item)
return items
| mit |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/python/training/basic_loops.py | 54 | 2245 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic loop for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
def basic_train_loop(supervisor, train_step_fn, args=None,
kwargs=None, master=""):
"""Basic loop to train a model.
Calls `train_step_fn` in a loop to train a model. The function is called as:
```python
train_step_fn(session, *args, **kwargs)
```
It is passed a `tf.Session` in addition to `args` and `kwargs`. The function
typically runs one training step in the session.
Args:
supervisor: `tf.Supervisor` to run the training services.
train_step_fn: Callable to execute one training step. Called
repeatedly as `train_step_fn(session, *args **kwargs)`.
args: Optional positional arguments passed to `train_step_fn`.
kwargs: Optional keyword arguments passed to `train_step_fn`.
master: Master to use to create the training session. Defaults to
`""` which causes the session to be created in the local process.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
should_retry = True
while should_retry:
try:
should_retry = False
with supervisor.managed_session(master) as sess:
while not supervisor.should_stop():
train_step_fn(sess, *args, **kwargs)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
should_retry = True
| mit |
bakercp/ofxIpVideoServer | example/bin/data/jQuery-File-Upload-master/server/gae-python/main.py | 223 | 5173 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json, re, urllib, webapp2
WEBSITE = 'http://blueimp.github.com/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(r'^.*\\', '',
fieldStorage.filename)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['delete_type'] = 'DELETE'
result['delete_url'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url\
.startswith('https')
)
result['thumbnail_url'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',',':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Cache for the expiration time:
self.response.headers['Cache-Control'] =\
'public,max-age=%d' % EXPIRATION_TIME
self.send_blob(key, save_as=filename)
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
) | mit |
shi2wei3/virt-test | virttest/remote_commander/messenger.py | 22 | 7889 | #!/usr/bin/env python
'''
Created on Dec 6, 2013
:author: jzupka
'''
import os
import logging
import select
import cPickle
import time
import remote_interface
import cStringIO
import base64
class IOWrapper(object):
"""
Class encaptulates io opearation to be more consist in different
implementations. (stdio, sockets, etc..)
"""
def __init__(self, obj):
"""
:param obj: IO obj for example file decriptor.
"""
self._obj = obj
def close(self):
raise NotImplementedError()
def read(self, max_len, timeout=None):
"""
Read function should be reinmplemented as blocking reading from data
source when timeout is None and nonblocking for timeout is not None.
Implementation example StdIWrapper.
:params max_len: Max len of readed data.
:type max_len: int
:param timeout: Timeout of reading operation.
:type timeout: float
:return: Readed data.
"""
raise NotImplementedError()
def write(self, data):
"""
Write funciton should be implemented for object uded for writing.
:param data: Data to write.
:type data: str.
"""
raise NotImplementedError()
def fileno(self):
"""
Function should return file descriptor number. If object should be used
for standard io operation.
:return: File number.
"""
raise NotImplementedError()
def _wait_for_data(self, max_len, timeout):
"""
Wait for data for time == timeout.
:params max_len: Max len of readed data.
:type max_len: int
:param timeout: Timeout of reading operation.
:type timeout: float
:return: Readed data.
"""
r, _, _ = select.select([self.fileno()], [], [], timeout)
if r:
return self.read(max_len, None)
return None
class DataWrapper(object):
"""
Basic implementation of IOWrapper for stdio.
"""
def decode(self, data):
"""
Decodes the data which was read.
:return: decoded data.
"""
return data
def encode(self, data):
"""
Encode data.
:return: encoded data.
"""
return data
class DataWrapperBase64(DataWrapper):
"""
Basic implementation of IOWrapper for stdio.
"""
def decode(self, data):
return base64.b64decode(data)
def encode(self, data):
return base64.b64encode(data)
class StdIOWrapper(IOWrapper, DataWrapper):
"""
Basic implementation of IOWrapper for stdio.
"""
def close(self):
os.close(self._obj)
def fileno(self):
return self._obj
class StdIOWrapperIn(StdIOWrapper):
"""
Basic implementation of IOWrapper for stdin
"""
def read(self, max_len, timeout=None):
if timeout is not None:
return self._wait_for_data(max_len, timeout)
else:
return os.read(self._obj, max_len)
class StdIOWrapperOut(StdIOWrapper):
"""
Basic implementation of IOWrapper for stdout
"""
def write(self, data):
os.write(self._obj, data)
class StdIOWrapperInBase64(StdIOWrapperIn, DataWrapperBase64):
"""
Basic implementation of IOWrapper for stdin
"""
class StdIOWrapperOutBase64(StdIOWrapperOut, DataWrapperBase64):
"""
Basic implementation of IOWrapper for stdout
"""
class MessengerError(Exception):
def __init__(self, msg):
super(MessengerError, self).__init__(msg)
self.msg = msg
def __str__(self):
return "Messenger ERROR %s" % (self.msg)
def _map_path(mod_name, kls_name):
if mod_name.endswith('remote_interface'): # catch all old module names
mod = remote_interface
return getattr(mod, kls_name)
else:
mod = __import__(mod_name)
return getattr(mod, kls_name)
class Messenger(object):
"""
Class could be used for communication between two python process connected
by communication canal wrapped by IOWrapper class. Pickling is used
for communication and thus it is possible to communicate every picleable
object.
"""
def __init__(self, stdin, stdout):
"""
:params stdin: Object for read data from communication interface.
:type stdin: IOWrapper
:params stdout: Object for write data to communication interface.
:type stdout: IOWrapper
"""
self.stdin = stdin
self.stdout = stdout
# Unfortunately only static length of data length is supported.
self.enc_len_length = len(stdout.encode("0" * 10))
def close(self):
self.stdin.close()
self.stdout.close()
def format_msg(self, data):
"""
Format message where first 10 char is length of message and rest is
piclked message.
"""
pdata = cPickle.dumps(data, cPickle.HIGHEST_PROTOCOL)
pdata = self.stdout.encode(pdata)
len_enc = self.stdout.encode("%10d" % len(pdata))
return "%s%s" % (len_enc, pdata)
def flush_stdin(self):
"""
Flush all input data from communication interface.
"""
const = 16384
r, _, _ = select.select([self.stdin.fileno()], [], [], 1)
while r:
if len(self.stdin.read(const)) < const:
break
r, _, _ = select.select([self.stdin.fileno()], [], [], 1)
def write_msg(self, data):
"""
Write formated message to communication interface.
"""
self.stdout.write(self.format_msg(data))
def _read_until_len(self, timeout=None):
"""
Deal with terminal interfaces... Read input until gets string
contains " " and digits len(string) == 10
:param timeout: timeout of reading.
"""
data = ""
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while (len(data) < self.enc_len_length and
(endtime is None or time.time() < endtime)):
d = self.stdin.read(1, timeout)
if d is None:
return None
if len(d) == 0:
return d
data += d
if len(data) < self.enc_len_length:
return None
return self.stdout.decode(data)
def read_msg(self, timeout=None):
"""
Read data from com interface.
:param timeout: timeout for reading data.
:type timeout: float
:return: (True, data) when reading is successful.
(False, None) when other side is closed.
(None, None) when reading is timeouted.
"""
data = self._read_until_len(timeout)
if data is None:
return (None, None)
if len(data) == 0:
return (False, None)
rdata = None
try:
cmd_len = int(data)
rdata = ""
rdata_len = 0
while (rdata_len < cmd_len):
rdata += self.stdin.read(cmd_len - rdata_len)
rdata_len = len(rdata)
rdataIO = cStringIO.StringIO(self.stdin.decode(rdata))
unp = cPickle.Unpickler(rdataIO)
unp.find_global = _map_path
data = unp.load()
except Exception, e:
logging.error("ERROR data:%s rdata:%s" % (data, rdata))
try:
self.write_msg(remote_interface.MessengerError("Communication "
"failed.%s" % (e)))
except OSError:
pass
self.flush_stdin()
raise
# Debugging commands.
# if (isinstance(data, remote_interface.BaseCmd)):
# print data.func
return (True, data)
| gpl-2.0 |
kevalds51/sympy | sympy/utilities/misc.py | 42 | 6363 | """Miscellaneous stuff that doesn't really fit anywhere else."""
from __future__ import print_function, division
import sys
import os
from textwrap import fill, dedent
from sympy.core.compatibility import get_function_name, range
def filldedent(s, w=70):
"""
Strips leading and trailing empty lines from a copy of `s`, then dedents,
fills and returns it.
Empty line stripping serves to deal with docstrings like this one that
start with a newline after the initial triple quote, inserting an empty
line at the beginning of the string."""
return '\n' + fill(dedent(str(s)).strip('\n'), width=w)
def rawlines(s):
"""Return a cut-and-pastable string that, when printed, is equivalent
to the input. The string returned is formatted so it can be indented
nicely within tests; in some cases it is wrapped in the dedent
function which has to be imported from textwrap.
Examples
========
Note: because there are characters in the examples below that need
to be escaped because they are themselves within a triple quoted
docstring, expressions below look more complicated than they would
be if they were printed in an interpreter window.
>>> from sympy.utilities.misc import rawlines
>>> from sympy import TableForm
>>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee'])))
>>> print(rawlines(s)) # the \\ appears as \ when printed
(
'a bee\\n'
'-----\\n'
'1 10 '
)
>>> print(rawlines('''this
... that'''))
dedent('''\\
this
that''')
>>> print(rawlines('''this
... that
... '''))
dedent('''\\
this
that
''')
>>> s = \"\"\"this
... is a triple '''
... \"\"\"
>>> print(rawlines(s))
dedent(\"\"\"\\
this
is a triple '''
\"\"\")
>>> print(rawlines('''this
... that
... '''))
(
'this\\n'
'that\\n'
' '
)
"""
lines = s.split('\n')
if len(lines) == 1:
return repr(lines[0])
triple = ["'''" in s, '"""' in s]
if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple):
rv = ["("]
# add on the newlines
trailing = s.endswith('\n')
last = len(lines) - 1
for i, li in enumerate(lines):
if i != last or trailing:
rv.append(repr(li)[:-1] + '\\n\'')
else:
rv.append(repr(li))
return '\n '.join(rv) + '\n)'
else:
rv = '\n '.join(lines)
if triple[0]:
return 'dedent("""\\\n %s""")' % rv
else:
return "dedent('''\\\n %s''')" % rv
size = getattr(sys, "maxint", None)
if size is None: # Python 3 doesn't have maxint
size = sys.maxsize
if size > 2**32:
ARCH = "64-bit"
else:
ARCH = "32-bit"
# XXX: PyPy doesn't support hash randomization
HASH_RANDOMIZATION = getattr(sys.flags, 'hash_randomization', False)
_debug_tmp = []
_debug_iter = 0
def debug_decorator(func):
"""If SYMPY_DEBUG is True, it will print a nice execution tree with
arguments and results of all decorated functions, else do nothing.
"""
from sympy import SYMPY_DEBUG
if not SYMPY_DEBUG:
return func
def maketree(f, *args, **kw):
global _debug_tmp
global _debug_iter
oldtmp = _debug_tmp
_debug_tmp = []
_debug_iter += 1
def tree(subtrees):
def indent(s, type=1):
x = s.split("\n")
r = "+-%s\n" % x[0]
for a in x[1:]:
if a == "":
continue
if type == 1:
r += "| %s\n" % a
else:
r += " %s\n" % a
return r
if len(subtrees) == 0:
return ""
f = []
for a in subtrees[:-1]:
f.append(indent(a))
f.append(indent(subtrees[-1], 2))
return ''.join(f)
# If there is a bug and the algorithm enters an infinite loop, enable the
# following lines. It will print the names and parameters of all major functions
# that are called, *before* they are called
#from sympy.core.compatibility import reduce
#print("%s%s %s%s" % (_debug_iter, reduce(lambda x, y: x + y, \
# map(lambda x: '-', range(1, 2 + _debug_iter))), get_function_name(f), args))
r = f(*args, **kw)
_debug_iter -= 1
s = "%s%s = %s\n" % (get_function_name(f), args, r)
if _debug_tmp != []:
s += tree(_debug_tmp)
_debug_tmp = oldtmp
_debug_tmp.append(s)
if _debug_iter == 0:
print((_debug_tmp[0]))
_debug_tmp = []
return r
def decorated(*args, **kwargs):
return maketree(func, *args, **kwargs)
return decorated
def debug(*args):
"""
Print ``*args`` if SYMPY_DEBUG is True, else do nothing.
"""
from sympy import SYMPY_DEBUG
if SYMPY_DEBUG:
print(*args, file=sys.stderr)
def find_executable(executable, path=None):
"""Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extlist = ['']
if os.name == 'os2':
(base, ext) = os.path.splitext(executable)
# executable files on OS/2 can have an arbitrary extension, but
# .exe is automatically appended if no dot is present in the name
if not ext:
executable = executable + ".exe"
elif sys.platform == 'win32':
pathext = os.environ['PATHEXT'].lower().split(os.pathsep)
(base, ext) = os.path.splitext(executable)
if ext.lower() not in pathext:
extlist = pathext
for ext in extlist:
execname = executable + ext
if os.path.isfile(execname):
return execname
else:
for p in paths:
f = os.path.join(p, execname)
if os.path.isfile(f):
return f
else:
return None
| bsd-3-clause |
startcode/apollo | modules/tools/mapshow/subplot_traj_acc.py | 2 | 3050 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
class TrajAccSubplot:
def __init__(self, ax):
self.ax = ax
self.acc_lines = []
self.acc_lines_size = 30
self.colors = []
self.init_colors()
#self.colors = ['b','r', 'y', 'k']
for i in range(self.acc_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.acc_lines.append(line)
ax.set_xlabel("t (second)")
#ax.set_xlim([-2, 10])
ax.set_ylim([-6, 6])
self.ax.autoscale_view()
#self.ax.relim()
ax.set_ylabel("acc (m/s^2)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = range(self.acc_lines_size)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.acc_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.acc_lines_size:
print "WARNING: number of path lines is more than " \
+ str(self.acc_lines_size)
continue
speed_line = self.acc_lines[self.acc_lines_size-i-1]
speed_line.set_xdata(planning.traj_acc_t_history[i])
speed_line.set_ydata(planning.traj_acc_a_history[i])
#speed_line.set_xdata([1,2,3,4])
#speed_line.set_ydata([1,2,3,4])
#speed_line.set_label(name[0:5])
speed_line.set_visible(True)
#self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
#self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim() | apache-2.0 |
clayz/crazy-quiz-web | lib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py | 35 | 19927 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from oauthlib import common
from oauthlib.uri_validate import is_absolute_uri
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class AuthorizationCodeGrant(GrantTypeBase):
"""`Authorization Code Grant`_
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI ---->| |
| User- | | Authorization |
| Agent -+----(B)-- User authenticates --->| Server |
| | | |
| -+----(C)-- Authorization Code ---<| |
+-|----|---+ +---------------+
| | ^ v
(A) (C) | |
| | | |
^ v | |
+---------+ | |
| |>---(D)-- Authorization Code ---------' |
| Client | & Redirection URI |
| | |
| |<---(E)----- Access Token -------------------'
+---------+ (w/ Optional Refresh Token)
Note: The lines illustrating steps (A), (B), and (C) are broken into
two parts as they pass through the user-agent.
Figure 3: Authorization Code Flow
The flow illustrated in Figure 3 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier (in the request or during
client registration). The redirection URI includes an
authorization code and any local state provided by the client
earlier.
(D) The client requests an access token from the authorization
server's token endpoint by including the authorization code
received in the previous step. When making the request, the
client authenticates with the authorization server. The client
includes the redirection URI used to obtain the authorization
code for verification.
(E) The authorization server authenticates the client, validates the
authorization code, and ensures that the redirection URI
received matches the URI used to redirect the client in
step (C). If valid, the authorization server responds back with
an access token and, optionally, a refresh token.
.. _`Authorization Code Grant`: http://tools.ietf.org/html/rfc6749#section-4.1
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_authorization_code(self, request):
"""Generates an authorization grant represented as a dictionary."""
grant = {'code': common.generate_token()}
if hasattr(request, 'state') and request.state:
grant['state'] = request.state
log.debug('Created authorization code grant %r for request %r.',
grant, request)
return grant
def create_authorization_response(self, request, token_handler):
"""
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "code".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
:param request: oauthlib.commong.Request
:param token_handler: A token handler instace, for example of type
oauthlib.oauth2.BearerToken.
:returns: headers, body, status
:raises: FatalClientError on invalid redirect URI or client id.
ValueError if scopes are not set on the request object.
A few examples::
>>> from your_validator import your_validator
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F')
>>> from oauthlib.common import Request
>>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken
>>> token = BearerToken(your_validator)
>>> grant = AuthorizationCodeGrant(your_validator)
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 513, in create_authorization_response
raise ValueError('Scopes must be set on post auth.')
ValueError: Scopes must be set on post auth.
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400)
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F'
... '&response_type=code')
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200)
>>> # If the client id or redirect uri fails validation
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 515, in create_authorization_response
>>> grant.create_authorization_response(request, token)
File "oauthlib/oauth2/rfc6749/grant_types.py", line 591, in validate_authorization_request
oauthlib.oauth2.rfc6749.errors.InvalidClientIdError
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: http://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: http://tools.ietf.org/html/rfc6749#section-10.12
"""
try:
# request.scopes is only mandated in post auth and both pre and
# post auth use validate_authorization_request
if not request.scopes:
raise ValueError('Scopes must be set on post auth.')
self.validate_authorization_request(request)
log.debug('Pre resource owner authorization validation ok for %r.',
request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# http://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
request.redirect_uri = request.redirect_uri or self.error_uri
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples)}, None, 302
grant = self.create_authorization_code(request)
log.debug('Saving grant %r for %r.', grant, request)
self.request_validator.save_authorization_code(
request.client_id, grant, request)
return {'Location': common.add_params_to_uri(request.redirect_uri, grant.items())}, None, 302
def create_token_response(self, request, token_handler):
"""Validate the authorization code.
The client MUST NOT use the authorization code more than once. If an
authorization code is used more than once, the authorization server
MUST deny the request and SHOULD revoke (when possible) all tokens
previously issued based on that authorization code. The authorization
code is bound to the client identifier and redirection URI.
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
self.validate_token_request(request)
log.debug('Token request validation ok for %r.', request)
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=True)
self.request_validator.invalidate_authorization_code(
request.client_id, request.code, request)
return headers, json.dumps(token), 200
def validate_authorization_request(self, request):
"""Check the authorization request for normal and fatal errors.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# REQUIRED. The client identifier as described in Section 2.2.
# http://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# OPTIONAL. As described in Section 3.1.2.
# http://tools.ietf.org/html/rfc6749#section-3.1.2
log.debug('Validating redirection uri %s for client %s.',
request.redirect_uri, request.client_id)
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(request=request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# http://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
if request.response_type is None:
raise errors.InvalidRequestError(description='Missing response_type parameter.', request=request)
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type, request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# REQUIRED. Value MUST be set to "code".
if request.response_type != 'code':
raise errors.UnsupportedResponseTypeError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# http://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
return request.scopes, {
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request,
}
def validate_token_request(self, request):
# REQUIRED. Value MUST be set to "authorization_code".
if request.grant_type != 'authorization_code':
raise errors.UnsupportedGrantTypeError(request=request)
if request.code is None:
raise errors.InvalidRequestError(
description='Missing code parameter.', request=request)
for param in ('client_id', 'grant_type', 'redirect_uri'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
if self.request_validator.client_authentication_required(request):
# If the client type is confidential or the client was issued client
# credentials (or assigned other authentication requirements), the
# client MUST authenticate with the authorization server as described
# in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
# REQUIRED, if the client is not authenticating with the
# authorization server as described in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
if not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The authorization code received from the
# authorization server.
if not self.request_validator.validate_code(request.client_id,
request.code, request.client, request):
log.debug('Client, %r (%r), is not allowed access to scopes %r.',
request.client_id, request.client, request.scopes)
raise errors.InvalidGrantError(request=request)
for attr in ('user', 'state', 'scopes'):
if getattr(request, attr) is None:
log.debug('request.%s was not set on code validation.', attr)
# REQUIRED, if the "redirect_uri" parameter was included in the
# authorization request as described in Section 4.1.1, and their
# values MUST be identical.
if not self.request_validator.confirm_redirect_uri(request.client_id, request.code,
request.redirect_uri, request.client):
log.debug('Redirect_uri (%r) invalid for client %r (%r).',
request.redirect_uri, request.client_id, request.client)
raise errors.AccessDeniedError(request=request)
| apache-2.0 |
coronary/RandomEpisode | depends/Lib/site-packages/pip/_vendor/requests/api.py | 362 | 5794 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| mit |
yunit-io/yunit | tests/autopilot/unity8/application_lifecycle/tests/__init__.py | 2 | 1853 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Unity Autopilot Test Suite
# Copyright (C) 2014, 2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from autopilot.matchers import Eventually
from testtools.matchers import Equals
from ubuntuuitoolkit import fixture_setup
from unity8 import process_helpers
from unity8.shell import tests
class ApplicationLifeCycleTestCase(tests.UnityTestCase):
def setUp(self):
super().setUp()
self._qml_mock_enabled = False
self._data_dirs_mock_enabled = False
self.launch_unity()
process_helpers.unlock_unity()
def create_test_application(self):
desktop_file_dict = fixture_setup.DEFAULT_DESKTOP_FILE_DICT
desktop_file_dict.update({'X-Ubuntu-Single-Instance': 'true'})
fake_application = fixture_setup.FakeApplication(
desktop_file_dict=desktop_file_dict)
self.useFixture(fake_application)
return (
fake_application.qml_file_path, fake_application.desktop_file_path)
def assert_current_focused_application(self, application_name):
self.assertThat(
self.main_window.get_current_focused_app_id,
Eventually(Equals(application_name)))
| gpl-3.0 |
Wuguanping/Server_Manage_Plugin | Openstack_Plugin/ironic-plugin-pike/ironic/drivers/modules/ilo/management.py | 3 | 17982 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iLO Management Interface
"""
from ironic_lib import metrics_utils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.conductor import task_manager
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import firmware_processor
from ironic.drivers.modules import ipmitool
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
ilo_error = importutils.try_import('proliantutils.exception')
BOOT_DEVICE_MAPPING_TO_ILO = {
boot_devices.PXE: 'NETWORK',
boot_devices.DISK: 'HDD',
boot_devices.CDROM: 'CDROM'
}
BOOT_DEVICE_ILO_TO_GENERIC = {
v: k for k, v in BOOT_DEVICE_MAPPING_TO_ILO.items()}
MANAGEMENT_PROPERTIES = ilo_common.REQUIRED_PROPERTIES.copy()
MANAGEMENT_PROPERTIES.update(ilo_common.CLEAN_PROPERTIES)
def _execute_ilo_clean_step(node, step, *args, **kwargs):
"""Executes a particular clean step.
:param node: an Ironic node object.
:param step: a clean step to be executed.
:param args: The args to be passed to the clean step.
:param kwargs: The kwargs to be passed to the clean step.
:raises: NodeCleaningFailure, on failure to execute step.
"""
ilo_object = ilo_common.get_ilo_object(node)
try:
clean_step = getattr(ilo_object, step)
except AttributeError:
# The specified clean step is not present in the proliantutils
# package. Raise exception to update the proliantutils package
# to newer version.
raise exception.NodeCleaningFailure(
_("Clean step '%s' not found. 'proliantutils' package needs to be "
"updated.") % step)
try:
clean_step(*args, **kwargs)
except ilo_error.IloCommandNotSupportedError:
# This clean step is not supported on Gen8 and below servers.
# Log the failure and continue with cleaning.
LOG.warning("'%(step)s' clean step is not supported on node "
"%(uuid)s. Skipping the clean step.",
{'step': step, 'uuid': node.uuid})
except ilo_error.IloError as ilo_exception:
raise exception.NodeCleaningFailure(_(
"Clean step %(step)s failed "
"on node %(node)s with error: %(err)s") %
{'node': node.uuid, 'step': step, 'err': ilo_exception})
class IloManagement(base.ManagementInterface):
def get_properties(self):
return MANAGEMENT_PROPERTIES
@METRICS.timer('IloManagement.validate')
def validate(self, task):
"""Check that 'driver_info' contains required ILO credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required iLO parameters
are not valid.
:raises: MissingParameterValue if a required parameter is missing.
"""
ilo_common.parse_driver_info(task.node)
@METRICS.timer('IloManagement.get_supported_boot_devices')
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(BOOT_DEVICE_MAPPING_TO_ILO.keys())
@METRICS.timer('IloManagement.get_boot_device')
def get_boot_device(self, task):
"""Get the current boot device for a node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: MissingParameterValue if a required iLO parameter is missing.
:raises: IloOperationError on an error from IloClient library.
:returns: a dictionary containing:
:boot_device:
the boot device, one of the supported devices listed in
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent:
Whether the boot device will persist to all future boots or
not, None if it is unknown.
"""
ilo_object = ilo_common.get_ilo_object(task.node)
persistent = False
try:
# Return one time boot device if set, else return
# the persistent boot device
next_boot = ilo_object.get_one_time_boot()
if next_boot == 'Normal':
# One time boot is not set. Check for persistent boot.
persistent = True
next_boot = ilo_object.get_persistent_boot_device()
except ilo_error.IloError as ilo_exception:
operation = _("Get boot device")
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
boot_device = BOOT_DEVICE_ILO_TO_GENERIC.get(next_boot, None)
if boot_device is None:
persistent = None
return {'boot_device': boot_device, 'persistent': persistent}
@METRICS.timer('IloManagement.set_boot_device')
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of the supported devices
listed in :mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IloOperationError on an error from IloClient library.
"""
try:
boot_device = BOOT_DEVICE_MAPPING_TO_ILO[device]
except KeyError:
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
try:
ilo_object = ilo_common.get_ilo_object(task.node)
if not persistent:
ilo_object.set_one_time_boot(boot_device)
else:
ilo_object.update_persistent_boot([boot_device])
except ilo_error.IloError as ilo_exception:
operation = _("Setting %s as boot device") % device
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
LOG.debug("Node %(uuid)s set to boot from %(device)s.",
{'uuid': task.node.uuid, 'device': device})
@METRICS.timer('IloManagement.get_sensors_data')
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
ilo_common.update_ipmi_properties(task)
ipmi_management = ipmitool.IPMIManagement()
return ipmi_management.get_sensors_data(task)
@METRICS.timer('IloManagement.reset_ilo')
@base.clean_step(priority=CONF.ilo.clean_priority_reset_ilo)
def reset_ilo(self, task):
"""Resets the iLO.
:param task: a task from TaskManager.
:raises: NodeCleaningFailure, on failure to execute step.
"""
return _execute_ilo_clean_step(task.node, 'reset_ilo')
@METRICS.timer('IloManagement.reset_ilo_credential')
@base.clean_step(priority=CONF.ilo.clean_priority_reset_ilo_credential)
def reset_ilo_credential(self, task):
"""Resets the iLO password.
:param task: a task from TaskManager.
:raises: NodeCleaningFailure, on failure to execute step.
"""
info = task.node.driver_info
password = info.pop('ilo_change_password', None)
if not password:
LOG.info("Missing 'ilo_change_password' parameter in "
"driver_info. Clean step 'reset_ilo_credential' is "
"not performed on node %s.", task.node.uuid)
return
_execute_ilo_clean_step(task.node, 'reset_ilo_credential', password)
info['ilo_password'] = password
task.node.driver_info = info
task.node.save()
@METRICS.timer('IloManagement.reset_bios_to_default')
@base.clean_step(priority=CONF.ilo.clean_priority_reset_bios_to_default)
def reset_bios_to_default(self, task):
"""Resets the BIOS settings to default values.
Resets BIOS to default settings. This operation is currently supported
only on HP Proliant Gen9 and above servers.
:param task: a task from TaskManager.
:raises: NodeCleaningFailure, on failure to execute step.
"""
return _execute_ilo_clean_step(task.node, 'reset_bios_to_default')
@METRICS.timer('IloManagement.reset_secure_boot_keys_to_default')
@base.clean_step(priority=CONF.ilo.
clean_priority_reset_secure_boot_keys_to_default)
def reset_secure_boot_keys_to_default(self, task):
"""Reset secure boot keys to manufacturing defaults.
Resets the secure boot keys to manufacturing defaults. This
operation is supported only on HP Proliant Gen9 and above servers.
:param task: a task from TaskManager.
:raises: NodeCleaningFailure, on failure to execute step.
"""
return _execute_ilo_clean_step(task.node, 'reset_secure_boot_keys')
@METRICS.timer('IloManagement.clear_secure_boot_keys')
@base.clean_step(priority=CONF.ilo.clean_priority_clear_secure_boot_keys)
def clear_secure_boot_keys(self, task):
"""Clear all secure boot keys.
Clears all the secure boot keys. This operation is supported only
on HP Proliant Gen9 and above servers.
:param task: a task from TaskManager.
:raises: NodeCleaningFailure, on failure to execute step.
"""
return _execute_ilo_clean_step(task.node, 'clear_secure_boot_keys')
@METRICS.timer('IloManagement.activate_license')
@base.clean_step(priority=0, abortable=False, argsinfo={
'ilo_license_key': {
'description': (
'The HPE iLO Advanced license key to activate enterprise '
'features.'
),
'required': True
}
})
def activate_license(self, task, **kwargs):
"""Activates iLO Advanced license.
:param task: a TaskManager object.
:raises: InvalidParameterValue, if any of the arguments are invalid.
:raises: NodeCleaningFailure, on failure to execute clean step.
"""
ilo_license_key = kwargs.get('ilo_license_key')
node = task.node
if not isinstance(ilo_license_key, six.string_types):
msg = (_("Value of 'ilo_license_key' must be a string instead of "
"'%(value)s'. Step 'activate_license' is not executed "
"for %(node)s.")
% {'value': ilo_license_key, 'node': node.uuid})
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
LOG.debug("Activating iLO license for node %(node)s ...",
{'node': node.uuid})
_execute_ilo_clean_step(node, 'activate_license', ilo_license_key)
LOG.info("iLO license activated for node %s.", node.uuid)
@METRICS.timer('IloManagement.update_firmware')
@base.clean_step(priority=0, abortable=False, argsinfo={
'firmware_update_mode': {
'description': (
"This argument indicates the mode (or mechanism) of firmware "
"update procedure. Supported value is 'ilo'."
),
'required': True
},
'firmware_images': {
'description': (
"This argument represents the ordered list of JSON "
"dictionaries of firmware images. Each firmware image "
"dictionary consists of three mandatory fields, namely 'url', "
"'checksum' and 'component'. These fields represent firmware "
"image location URL, md5 checksum of image file and firmware "
"component type respectively. The supported firmware URL "
"schemes are 'file', 'http', 'https' and 'swift'. The "
"supported values for firmware component are 'ilo', 'cpld', "
"'power_pic', 'bios' and 'chassis'. The firmware images will "
"be applied (in the order given) one by one on the baremetal "
"server. For more information, see "
"https://docs.openstack.org/ironic/latest/admin/drivers/ilo.html#initiating-firmware-update-as-manual-clean-step" # noqa
),
'required': True
}
})
@firmware_processor.verify_firmware_update_args
def update_firmware(self, task, **kwargs):
"""Updates the firmware.
:param task: a TaskManager object.
:raises: InvalidParameterValue if update firmware mode is not 'ilo'.
Even applicable for invalid input cases.
:raises: NodeCleaningFailure, on failure to execute step.
"""
node = task.node
fw_location_objs_n_components = []
firmware_images = kwargs['firmware_images']
# Note(deray): Processing of firmware images happens here. As part
# of processing checksum validation is also done for the firmware file.
# Processing of firmware file essentially means downloading the file
# on the conductor, validating the checksum of the downloaded content,
# extracting the raw firmware file from its compact format, if it is,
# and hosting the file on a web server or a swift store based on the
# need of the baremetal server iLO firmware update method.
try:
for firmware_image_info in firmware_images:
url, checksum, component = (
firmware_processor.get_and_validate_firmware_image_info(
firmware_image_info))
LOG.debug("Processing of firmware file: %(firmware_file)s on "
"node: %(node)s ... in progress",
{'firmware_file': url, 'node': node.uuid})
fw_processor = firmware_processor.FirmwareProcessor(url)
fw_location_obj = fw_processor.process_fw_on(node, checksum)
fw_location_objs_n_components.append(
(fw_location_obj, component))
LOG.debug("Processing of firmware file: %(firmware_file)s on "
"node: %(node)s ... done",
{'firmware_file': url, 'node': node.uuid})
except exception.IronicException as ilo_exc:
# delete all the files extracted so far from the extracted list
# and re-raise the exception
for fw_loc_obj_n_comp_tup in fw_location_objs_n_components:
fw_loc_obj_n_comp_tup[0].remove()
LOG.error("Processing of firmware image: %(firmware_image)s "
"on node: %(node)s ... failed",
{'firmware_image': firmware_image_info,
'node': node.uuid})
raise exception.NodeCleaningFailure(node=node.uuid, reason=ilo_exc)
# Updating of firmware images happen here.
try:
for fw_location_obj, component in fw_location_objs_n_components:
fw_location = fw_location_obj.fw_image_location
LOG.debug("Firmware update for %(firmware_file)s on "
"node: %(node)s ... in progress",
{'firmware_file': fw_location, 'node': node.uuid})
_execute_ilo_clean_step(
node, 'update_firmware', fw_location, component)
LOG.debug("Firmware update for %(firmware_file)s on "
"node: %(node)s ... done",
{'firmware_file': fw_location, 'node': node.uuid})
except exception.NodeCleaningFailure:
with excutils.save_and_reraise_exception():
LOG.error("Firmware update for %(firmware_file)s on "
"node: %(node)s failed.",
{'firmware_file': fw_location, 'node': node.uuid})
finally:
for fw_loc_obj_n_comp_tup in fw_location_objs_n_components:
fw_loc_obj_n_comp_tup[0].remove()
LOG.info("All Firmware update operations completed successfully "
"for node: %s.", node.uuid)
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network_gateway_connection_py3.py | 1 | 6865 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnection(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required.
:type virtual_network_gateway1:
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:param virtual_network_gateway2:
:type virtual_network_gateway2:
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:param local_network_gateway2:
:type local_network_gateway2:
~azure.mgmt.network.v2016_09_01.models.LocalNetworkGateway
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2016_09_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, virtual_network_gateway1, connection_type, id: str=None, location: str=None, tags=None, authorization_key: str=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight: int=None, shared_key: str=None, peer=None, enable_bgp: bool=None, resource_guid: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkGatewayConnection, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
| mit |
JioCloud/oslo.messaging | oslo/messaging/_drivers/matchmaker.py | 1 | 9371 | # Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import logging
import eventlet
from oslo.config import cfg
# FIXME(markmc): remove this
_ = lambda s: s
matchmaker_opts = [
cfg.IntOpt('matchmaker_heartbeat_freq',
default=300,
help='Heartbeat frequency.'),
cfg.IntOpt('matchmaker_heartbeat_ttl',
default=600,
help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""A binding on which to perform a lookup."""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
MatchMaker.
"""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
self.no_heartbeat_msg = _('Matchmaker does not implement '
'registration or heartbeat.')
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
pass
def ack_alive(self, key, host):
"""Acknowledge that a key.host is alive.
Used internally for updating heartbeats, but may also be used
publicly to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
pass
def is_alive(self, topic, host):
"""Checks if a host is alive."""
pass
def expire(self, topic, host):
"""Explicitly expire a host's registration."""
pass
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
pass
def unregister(self, key, host):
"""Unregister a topic."""
pass
def start_heartbeat(self):
"""Spawn heartbeat greenthread."""
pass
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
pass
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class HeartbeatMatchMakerBase(MatchMakerBase):
"""Base for a heart-beat capable MatchMaker.
Provides common methods for registering, unregistering, and maintaining
heartbeats.
"""
def __init__(self):
self.hosts = set()
self._heart = None
self.host_topic = {}
super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
for key, host in self.host_topic:
self.ack_alive(key, host)
def ack_alive(self, key, host):
"""Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, but may also be used
publicly to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host):
"""Implements registration logic.
Called by register(self,key,host)
"""
raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host):
"""Implements de-registration logic.
Called by unregister(self,key,host)
"""
raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
self.hosts.add(host)
self.host_topic[(key, host)] = host
key_host = '.'.join((key, host))
self.backend_register(key, key_host)
self.ack_alive(key, host)
def unregister(self, key, host):
"""Unregister a topic."""
if (key, host) in self.host_topic:
del self.host_topic[(key, host)]
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):
"""Implementation of MatchMakerBase.start_heartbeat.
Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations.
"""
if not self.hosts:
raise MatchMakerException(
_("Register before starting heartbeat."))
def do_heartbeat():
while True:
self.send_heartbeats()
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
if self._heart:
self._heart.kill()
class DirectBinding(Binding):
"""Specifies a host in the key via a '.' character.
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
return '.' in key
class TopicBinding(Binding):
"""Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
return '.' not in key
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
return key.startswith('fanout~')
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
e = key.split('.', 1)[1]
return [(key, e)]
class MatchMakerLocalhost(MatchMakerBase):
"""Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
"""Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())
| apache-2.0 |
adamwiggins/cocos2d | cocos/__init__.py | 2 | 3184 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''cocos2d
cocos2d is a framework for building 2D games, demos, and other graphical/interactive applications.
Main Features:
--------------
* Flow control: Manage the flow control between different scenes in an easy way
* Sprites: Fast and easy sprites
* Actions: Just tell sprites what you want them to do. Composable actions like move, rotate, scale and much more
* Effects: Effects like waves, twirl, lens and much more
* Tiled Maps: Support for rectangular and hexagonal tiled maps
* Transitions: Move from scene to scene with style
* Menus: Built in classes to create menus
* Text Rendering: Label and HTMLLabel with action support
* Documentation: Programming Guide + API Reference + Video Tutorials + Lots of simple tests showing how to use it
* Built-in Python Interpreter: For debugging purposes
* BSD License: Just use it
* Pyglet Based: No external dependencies
* OpenGL Based: Hardware Acceleration
http://cocos2d.org
'''
__version__ = "0.3.0"
__author__ = "cocos2d team"
version = __version__
# add the cocos resources path
import os, pyglet
pyglet.resource.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), "resources"))
pyglet.resource.reindex()
del os, pyglet
import actions
import director
import layer
import menu
import sprite
import path
import scene
import grid
import text
import camera
import draw
import skeleton
| bsd-3-clause |
srsman/odoo | addons/sale/edi/__init__.py | 454 | 1065 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mmasaki/trove | trove/common/i18n.py | 8 | 1460 | # Copyright 2014 Tesora, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo_i18n.TranslatorFactory(domain='trove')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| apache-2.0 |
PetePriority/home-assistant | tests/components/duckdns/test_init.py | 14 | 3203 | """Test the DuckDNS component."""
import asyncio
from datetime import timedelta
import pytest
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component
from homeassistant.components import duckdns
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
DOMAIN = 'bla'
TOKEN = 'abcdefgh'
@bind_hass
@asyncio.coroutine
def async_set_txt(hass, txt):
"""Set the txt record. Pass in None to remove it.
This is a legacy helper method. Do not use it for new tests.
"""
yield from hass.services.async_call(
duckdns.DOMAIN, duckdns.SERVICE_SET_TXT, {
duckdns.ATTR_TXT: txt
}, blocking=True)
@pytest.fixture
def setup_duckdns(hass, aioclient_mock):
"""Fixture that sets up DuckDNS."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='OK')
hass.loop.run_until_complete(async_setup_component(
hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
}))
@asyncio.coroutine
def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='OK')
result = yield from async_setup_component(hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
})
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_setup_fails_if_update_fails(hass, aioclient_mock):
"""Test setup fails if first update fails."""
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN
}, text='KO')
result = yield from async_setup_component(hass, duckdns.DOMAIN, {
'duckdns': {
'domain': DOMAIN,
'access_token': TOKEN
}
})
assert not result
assert aioclient_mock.call_count == 1
@asyncio.coroutine
def test_service_set_txt(hass, aioclient_mock, setup_duckdns):
"""Test set txt service call."""
# Empty the fixture mock requests
aioclient_mock.clear_requests()
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN,
'txt': 'some-txt',
}, text='OK')
assert aioclient_mock.call_count == 0
yield from async_set_txt(hass, 'some-txt')
assert aioclient_mock.call_count == 1
@asyncio.coroutine
def test_service_clear_txt(hass, aioclient_mock, setup_duckdns):
"""Test clear txt service call."""
# Empty the fixture mock requests
aioclient_mock.clear_requests()
aioclient_mock.get(duckdns.UPDATE_URL, params={
'domains': DOMAIN,
'token': TOKEN,
'txt': '',
'clear': 'true',
}, text='OK')
assert aioclient_mock.call_count == 0
yield from async_set_txt(hass, None)
assert aioclient_mock.call_count == 1
| apache-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/factorization/python/ops/factorization_ops_test_utils.py | 116 | 6182 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
INPUT_MATRIX = np.array(
[[0.1, 0.0, 0.2, 0.0, 0.4, 0.5, 0.0],
[0.0, 1.1, 0.0, 1.3, 1.4, 0.0, 1.6],
[2.0, 0.0, 0.0, 2.3, 0.0, 2.5, 0.0],
[3.0, 0.0, 3.2, 3.3, 0.0, 3.5, 0.0],
[0.0, 4.1, 0.0, 0.0, 4.4, 0.0, 4.6]]).astype(np.float32)
def remove_empty_rows_columns(np_matrix):
"""Simple util to remove empty rows and columns of a matrix.
Args:
np_matrix: A numpy array.
Returns:
A tuple consisting of:
mat: A numpy matrix obtained by removing empty rows and columns from
np_matrix.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
nz_row_ids = np.where(np.sum(np_matrix, axis=1) != 0)[0]
nz_col_ids = np.where(np.sum(np_matrix, axis=0) != 0)[0]
mat = np_matrix[np.ix_(nz_row_ids, nz_col_ids)]
return mat, nz_row_ids, nz_col_ids
def np_matrix_to_tf_sparse(np_matrix,
row_slices=None,
col_slices=None,
transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate((np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if
transpose else np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)),
1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array([max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64)
if transpose else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return sparse_tensor.SparseTensor(ind, val, shape)
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
w0=1., row_weights=None, col_weights=None):
"""Calculates the loss of a given factorization.
Using a non distributed method, different than the one implemented in the
WALS model. The weight of an observed entry (i, j) (i.e. such that
input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).
Args:
input_mat: The input matrix, a SparseTensor of rank 2.
row_factors: The row factors, a dense Tensor of rank 2.
col_factors: The col factors, a dense Tensor of rank 2.
regularization: the regularization coefficient, a scalar.
w0: the weight of unobserved entries. A scalar.
row_weights: A dense tensor of rank 1.
col_weights: A dense tensor of rank 1.
Returns:
The total loss.
"""
wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
else constant_op.constant(1.))
wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
else constant_op.constant(1.))
reg = (regularization if regularization is not None
else constant_op.constant(0.))
row_indices, col_indices = array_ops.split(input_mat.indices,
axis=1,
num_or_size_splits=2)
gathered_row_factors = array_ops.gather(row_factors, row_indices)
gathered_col_factors = array_ops.gather(col_factors, col_indices)
sp_approx_vals = array_ops.squeeze(math_ops.matmul(
gathered_row_factors, gathered_col_factors, adjoint_b=True))
sp_approx = sparse_tensor.SparseTensor(
indices=input_mat.indices,
values=sp_approx_vals,
dense_shape=input_mat.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
row_factors, col_factors, transpose_b=True)))
resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
resid_sq = math_ops.square(resid)
loss = w0 * (
sparse_ops.sparse_reduce_sum(resid_sq) -
sparse_ops.sparse_reduce_sum(sp_approx_sq)
)
loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
w0 * row_col_norm + reg * (row_norm + col_norm))
return loss.eval()
| mit |
thumbimigwe/echorizr | lib/python2.7/site-packages/django/contrib/gis/sitemaps/views.py | 144 | 2365 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The field name must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| mit |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/plat-os2emx/pwd.py | 67 | 6415 | # this module is an OS/2 oriented replacement for the pwd standard
# extension module.
# written by Andrew MacIntyre, April 2001.
# updated July 2003, adding field accessor support
# note that this implementation checks whether ":" or ";" as used as
# the field separator character. Path conversions are are applied when
# the database uses ":" as the field separator character.
"""Replacement for pwd standard extension module, intended for use on
OS/2 and similar systems which don't normally have an /etc/passwd file.
The standard Unix password database is an ASCII text file with 7 fields
per record (line), separated by a colon:
- user name (string)
- password (encrypted string, or "*" or "")
- user id (integer)
- group id (integer)
- description (usually user's name)
- home directory (path to user's home directory)
- shell (path to the user's login shell)
(see the section 8.1 of the Python Library Reference)
This implementation differs from the standard Unix implementation by
allowing use of the platform's native path separator character - ';' on OS/2,
DOS and MS-Windows - as the field separator in addition to the Unix
standard ":". Additionally, when ":" is the separator path conversions
are applied to deal with any munging of the drive letter reference.
The module looks for the password database at the following locations
(in order first to last):
- ${ETC_PASSWD} (or %ETC_PASSWD%)
- ${ETC}/passwd (or %ETC%/passwd)
- ${PYTHONHOME}/Etc/passwd (or %PYTHONHOME%/Etc/passwd)
Classes
-------
None
Functions
---------
getpwuid(uid) - return the record for user-id uid as a 7-tuple
getpwnam(name) - return the record for user 'name' as a 7-tuple
getpwall() - return a list of 7-tuples, each tuple being one record
(NOTE: the order is arbitrary)
Attributes
----------
passwd_file - the path of the password database file
"""
import os
# try and find the passwd file
__passwd_path = []
if os.environ.has_key('ETC_PASSWD'):
__passwd_path.append(os.environ['ETC_PASSWD'])
if os.environ.has_key('ETC'):
__passwd_path.append('%s/passwd' % os.environ['ETC'])
if os.environ.has_key('PYTHONHOME'):
__passwd_path.append('%s/Etc/passwd' % os.environ['PYTHONHOME'])
passwd_file = None
for __i in __passwd_path:
try:
__f = open(__i, 'r')
__f.close()
passwd_file = __i
break
except:
pass
# path conversion handlers
def __nullpathconv(path):
return path.replace(os.altsep, os.sep)
def __unixpathconv(path):
# two known drive letter variations: "x;" and "$x"
if path[0] == '$':
conv = path[1] + ':' + path[2:]
elif path[1] == ';':
conv = path[0] + ':' + path[2:]
else:
conv = path
return conv.replace(os.altsep, os.sep)
# decide what field separator we can try to use - Unix standard, with
# the platform's path separator as an option. No special field conversion
# handler is required when using the platform's path separator as field
# separator, but are required for the home directory and shell fields when
# using the standard Unix (":") field separator.
__field_sep = {':': __unixpathconv}
if os.pathsep:
if os.pathsep != ':':
__field_sep[os.pathsep] = __nullpathconv
# helper routine to identify which separator character is in use
def __get_field_sep(record):
fs = None
for c in __field_sep.keys():
# there should be 6 delimiter characters (for 7 fields)
if record.count(c) == 6:
fs = c
break
if fs:
return fs
else:
raise KeyError, '>> passwd database fields not delimited <<'
# class to match the new record field name accessors.
# the resulting object is intended to behave like a read-only tuple,
# with each member also accessible by a field name.
class Passwd:
def __init__(self, name, passwd, uid, gid, gecos, dir, shell):
self.__dict__['pw_name'] = name
self.__dict__['pw_passwd'] = passwd
self.__dict__['pw_uid'] = uid
self.__dict__['pw_gid'] = gid
self.__dict__['pw_gecos'] = gecos
self.__dict__['pw_dir'] = dir
self.__dict__['pw_shell'] = shell
self.__dict__['_record'] = (self.pw_name, self.pw_passwd,
self.pw_uid, self.pw_gid,
self.pw_gecos, self.pw_dir,
self.pw_shell)
def __len__(self):
return 7
def __getitem__(self, key):
return self._record[key]
def __setattr__(self, name, value):
raise AttributeError('attribute read-only: %s' % name)
def __repr__(self):
return str(self._record)
def __cmp__(self, other):
this = str(self._record)
if this == other:
return 0
elif this < other:
return -1
else:
return 1
# read the whole file, parsing each entry into tuple form
# with dictionaries to speed recall by UID or passwd name
def __read_passwd_file():
if passwd_file:
passwd = open(passwd_file, 'r')
else:
raise KeyError, '>> no password database <<'
uidx = {}
namx = {}
sep = None
while 1:
entry = passwd.readline().strip()
if len(entry) > 6:
if sep is None:
sep = __get_field_sep(entry)
fields = entry.split(sep)
for i in (2, 3):
fields[i] = int(fields[i])
for i in (5, 6):
fields[i] = __field_sep[sep](fields[i])
record = Passwd(*fields)
if not uidx.has_key(fields[2]):
uidx[fields[2]] = record
if not namx.has_key(fields[0]):
namx[fields[0]] = record
elif len(entry) > 0:
pass # skip empty or malformed records
else:
break
passwd.close()
if len(uidx) == 0:
raise KeyError
return (uidx, namx)
# return the passwd database entry by UID
def getpwuid(uid):
u, n = __read_passwd_file()
return u[uid]
# return the passwd database entry by passwd name
def getpwnam(name):
u, n = __read_passwd_file()
return n[name]
# return all the passwd database entries
def getpwall():
u, n = __read_passwd_file()
return n.values()
# test harness
if __name__ == '__main__':
getpwall()
| apache-2.0 |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/internet/test/test_sigchld.py | 59 | 3935 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet._sigchld}, an alternate, superior SIGCHLD
monitoring API.
"""
from __future__ import division, absolute_import
import os, signal, errno
from twisted.python.runtime import platformType
from twisted.python.log import msg
from twisted.trial.unittest import SynchronousTestCase
if platformType == "posix":
from twisted.internet.fdesc import setNonBlocking
from twisted.internet._signals import installHandler, isDefaultHandler
else:
skip = "These tests can only run on POSIX platforms."
class SetWakeupSIGCHLDTests(SynchronousTestCase):
"""
Tests for the L{signal.set_wakeup_fd} implementation of the
L{installHandler} and L{isDefaultHandler} APIs.
"""
def pipe(self):
"""
Create a non-blocking pipe which will be closed after the currently
running test.
"""
read, write = os.pipe()
self.addCleanup(os.close, read)
self.addCleanup(os.close, write)
setNonBlocking(read)
setNonBlocking(write)
return read, write
def setUp(self):
"""
Save the current SIGCHLD handler as reported by L{signal.signal} and
the current file descriptor registered with L{installHandler}.
"""
handler = signal.getsignal(signal.SIGCHLD)
if handler != signal.SIG_DFL:
self.signalModuleHandler = handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
else:
self.signalModuleHandler = None
self.oldFD = installHandler(-1)
if self.signalModuleHandler is not None and self.oldFD != -1:
msg("Previous test didn't clean up after its SIGCHLD setup: %r %r"
% (self.signalModuleHandler, self.oldFD))
def tearDown(self):
"""
Restore whatever signal handler was present when setUp ran.
"""
# If tests set up any kind of handlers, clear them out.
installHandler(-1)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Now restore whatever the setup was before the test ran.
if self.signalModuleHandler is not None:
signal.signal(signal.SIGCHLD, self.signalModuleHandler)
elif self.oldFD != -1:
installHandler(self.oldFD)
def test_isDefaultHandler(self):
"""
L{isDefaultHandler} returns true if the SIGCHLD handler is SIG_DFL,
false otherwise.
"""
self.assertTrue(isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.assertFalse(isDefaultHandler())
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.assertTrue(isDefaultHandler())
signal.signal(signal.SIGCHLD, lambda *args: None)
self.assertFalse(isDefaultHandler())
def test_returnOldFD(self):
"""
L{installHandler} returns the previously registered file descriptor.
"""
read, write = self.pipe()
oldFD = installHandler(write)
self.assertEqual(installHandler(oldFD), write)
def test_uninstallHandler(self):
"""
C{installHandler(-1)} removes the SIGCHLD handler completely.
"""
read, write = self.pipe()
self.assertTrue(isDefaultHandler())
installHandler(write)
self.assertFalse(isDefaultHandler())
installHandler(-1)
self.assertTrue(isDefaultHandler())
def test_installHandler(self):
"""
The file descriptor passed to L{installHandler} has a byte written to
it when SIGCHLD is delivered to the process.
"""
read, write = self.pipe()
installHandler(write)
exc = self.assertRaises(OSError, os.read, read, 1)
self.assertEqual(exc.errno, errno.EAGAIN)
os.kill(os.getpid(), signal.SIGCHLD)
self.assertEqual(len(os.read(read, 5)), 1)
| gpl-3.0 |
rhurkes/chasegame | venv/lib/python2.7/site-packages/pyasn1_modules/rfc3447.py | 127 | 1400 | #
# PKCS#1 syntax
#
# ASN.1 source from:
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn
#
# Sample captures could be obtained with "openssl genrsa" command
#
from pyasn1_modules.rfc2437 import *
class OtherPrimeInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('prime', univ.Integer()),
namedtype.NamedType('exponent', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer())
)
class OtherPrimeInfos(univ.SequenceOf):
componentType = OtherPrimeInfo()
subtypeSpec = univ.SequenceOf.subtypeSpec + \
constraint.ValueSizeConstraint(1, MAX)
class RSAPrivateKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos())
)
| mit |
knipknap/SpiffWorkflow | SpiffWorkflow/specs/Celery.py | 1 | 10972 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import
# Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import logging
import json
from ..task import Task
from ..exceptions import WorkflowException
from .base import TaskSpec
from ..operators import valueof, Attrib, PathAttrib
from ..util import merge_dictionary
try:
from celery.app import default_app
except ImportError:
have_celery = False
else:
have_celery = True
LOG = logging.getLogger(__name__)
def _eval_args(args, my_task):
"""Parses args and evaluates any Attrib entries"""
results = []
for arg in args:
if isinstance(arg, Attrib) or isinstance(arg, PathAttrib):
results.append(valueof(my_task, arg))
else:
results.append(arg)
return results
def _eval_kwargs(kwargs, my_task):
"""Parses kwargs and evaluates any Attrib entries"""
results = {}
for kwarg, value in list(kwargs.items()):
if isinstance(value, Attrib) or isinstance(value, PathAttrib):
results[kwarg] = valueof(my_task, value)
else:
results[kwarg] = value
return results
def Serializable(o):
"""Make sure an object is JSON-serializable
Use this to return errors and other info that does not need to be
deserialized or does not contain important app data. Best for returning
error info and such"""
if isinstance(o, (str, dict, int)):
return o
else:
try:
json.dumps(o)
return o
except Exception:
LOG.debug("Got a non-serilizeable object: %s" % o)
return o.__repr__()
class Celery(TaskSpec):
"""This class implements a celeryd task that is sent to the celery queue for
completion."""
def __init__(self, wf_spec, name, call, call_args=None, result_key=None,
merge_results=False, **kwargs):
"""Constructor.
The args/kwargs arguments support Attrib classes in the parameters for
delayed evaluation of inputs until run-time. Example usage:
task = Celery(wfspec, 'MyTask', 'celery.call',
call_args=['hello', 'world', Attrib('number')],
any_param=Attrib('result'))
For serialization, the celery task_id is stored in internal_data,
but the celery async call is only stored as an attr of the task (since
it is not always serializable). When deserialized, the async_call attr
is reset in the _start call.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type call: str
:param call: The name of the celery task that needs to be called.
:type call_args: list
:param call_args: args to pass to celery task.
:type result_key: str
:param result_key: The key to use to store the results of the call in
task.internal_data. If None, then dicts are expanded into
internal_data and values are stored in 'result'.
:type merge_results: bool
:param merge_results: merge the results in instead of overwriting
existing fields.
:type kwargs: dict
:param kwargs: kwargs to pass to celery task.
"""
if not have_celery:
raise Exception("Unable to import python-celery imports.")
assert wf_spec is not None
assert name is not None
assert call is not None
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.description = kwargs.pop('description', '')
self.call = call or []
self.args = call_args or {}
self.merge_results = merge_results
skip = 'data', 'defines', 'pre_assign', 'post_assign', 'lock'
self.kwargs = dict(i for i in list(kwargs.items()) if i[0] not in skip)
self.result_key = result_key
LOG.debug("Celery task '%s' created to call '%s'" % (name, call))
def _send_call(self, my_task):
"""Sends Celery asynchronous call and stores async call information for
retrieval laster"""
args, kwargs = None, None
if self.args:
args = _eval_args(self.args, my_task)
if self.kwargs:
kwargs = _eval_kwargs(self.kwargs, my_task)
LOG.debug(
"%s (task id %s) calling %s" % (self.name, my_task.id, self.call),
extra=dict(data=dict(args=args, kwargs=kwargs)))
async_call = default_app.send_task(self.call, args=args, kwargs=kwargs)
my_task._set_internal_data(task_id=async_call.task_id)
my_task.async_call = async_call
LOG.debug("'%s' called: %s" % (self.call, my_task.async_call.task_id))
def _restart(self, my_task):
""" Abort celery task and retry it"""
if not my_task._has_state(Task.WAITING):
raise WorkflowException(my_task, "Cannot refire a task that is not"
"in WAITING state")
# Check state of existing call and abort it (save history)
if my_task._get_internal_data('task_id') is not None:
if not hasattr(my_task, 'async_call'):
task_id = my_task._get_internal_data('task_id')
my_task.async_call = default_app.AsyncResult(task_id)
my_task.deserialized = True
my_task.async_call.state # manually refresh
async_call = my_task.async_call
if async_call.state == 'FAILED':
pass
elif async_call.state in ['RETRY', 'PENDING', 'STARTED']:
async_call.revoke()
LOG.info("Celery task '%s' was in %s state and was revoked" % (
async_call.state, async_call))
elif async_call.state == 'SUCCESS':
LOG.warning("Celery task '%s' succeeded, but a refire was "
"requested" % async_call)
self._clear_celery_task_data(my_task)
# Retrigger
return self._start(my_task)
def _clear_celery_task_data(self, my_task):
""" Clear celery task data """
# Save history
if 'task_id' in my_task.internal_data:
# Save history for diagnostics/forensics
history = my_task._get_internal_data('task_history', [])
history.append(my_task._get_internal_data('task_id'))
del my_task.internal_data['task_id']
my_task._set_internal_data(task_history=history)
if 'task_state' in my_task.internal_data:
del my_task.internal_data['task_state']
if 'error' in my_task.internal_data:
del my_task.internal_data['error']
if hasattr(my_task, 'async_call'):
delattr(my_task, 'async_call')
if hasattr(my_task, 'deserialized'):
delattr(my_task, 'deserialized')
def _start(self, my_task, force=False):
"""Returns False when successfully fired, True otherwise"""
# Deserialize async call if necessary
if not hasattr(my_task, 'async_call') and \
my_task._get_internal_data('task_id') is not None:
task_id = my_task._get_internal_data('task_id')
my_task.async_call = default_app.AsyncResult(task_id)
my_task.deserialized = True
LOG.debug("Reanimate AsyncCall %s" % task_id)
# Make the call if not already done
if not hasattr(my_task, 'async_call'):
self._send_call(my_task)
# Get call status (and manually refresh if deserialized)
if getattr(my_task, "deserialized", False):
my_task.async_call.state # must manually refresh if deserialized
if my_task.async_call.state == 'FAILURE':
LOG.debug("Async Call for task '%s' failed: %s" % (
my_task.get_name(), my_task.async_call.info))
info = {}
info['traceback'] = my_task.async_call.traceback
info['info'] = Serializable(my_task.async_call.info)
info['state'] = my_task.async_call.state
my_task._set_internal_data(task_state=info)
elif my_task.async_call.state == 'RETRY':
info = {}
info['traceback'] = my_task.async_call.traceback
info['info'] = Serializable(my_task.async_call.info)
info['state'] = my_task.async_call.state
my_task._set_internal_data(task_state=info)
elif my_task.async_call.ready():
result = my_task.async_call.result
if isinstance(result, Exception):
LOG.warn("Celery call %s failed: %s" % (self.call, result))
my_task._set_internal_data(error=Serializable(result))
return False
LOG.debug("Completed celery call %s with result=%s" % (self.call,
result))
# Format result
if self.result_key:
data = {self.result_key: result}
else:
if isinstance(result, dict):
data = result
else:
data = {'result': result}
# Load formatted result into internal_data
if self.merge_results:
merge_dictionary(my_task.internal_data, data)
else:
my_task.set_data(**data)
return True
else:
LOG.debug("async_call.ready()=%s. TryFire for '%s' "
"returning False" % (my_task.async_call.ready(),
my_task.get_name()))
return False
def _update_hook(self, my_task):
if not self._start(my_task):
if not my_task._has_state(Task.WAITING):
LOG.debug("'%s' going to WAITING state" % my_task.get_name())
my_task.state = Task.WAITING
return
super(Celery, self)._update_hook(my_task)
def serialize(self, serializer):
return serializer.serialize_celery(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
spec = serializer.deserialize_celery(wf_spec, s_state)
return spec
| lgpl-3.0 |
dendisuhubdy/tensorflow | tensorflow/examples/tutorials/layers/cnn_mnist.py | 43 | 5711 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
ThoughtWorksInc/treadmill | treadmill/rest/api/identity_group.py | 3 | 2623 | """
Treadmill Identity Group REST api.
"""
import flask
import flask_restplus as restplus
from flask_restplus import fields
# Disable E0611: No 'name' in module
from treadmill import webutils # pylint: disable=E0611
# Old style classes, no init method.
#
# pylint: disable=W0232
def init(api, cors, impl):
"""Configures REST handlers for app monitor resource."""
namespace = webutils.namespace(
api, __name__, 'Identity Group REST operations'
)
model = {
'_id': fields.String(description='Name'),
'count': fields.Integer(
description='Identiy Group Count',
required=True),
}
identity_group_model = api.model(
'IdentityGroup', model
)
match_parser = api.parser()
match_parser.add_argument('match', help='A glob match on an app monitor',
location='args', required=False,)
@namespace.route(
'/',
)
class _IdentityGroupList(restplus.Resource):
"""Treadmill identity group resource"""
@webutils.get_api(api, cors,
marshal=api.marshal_list_with,
resp_model=identity_group_model,
parser=match_parser)
def get(self):
"""Returns list of configured identity groups."""
args = match_parser.parse_args()
return impl.list(args.get('match'))
@namespace.route('/<app_id>')
@api.doc(params={'app_ip': 'App ID/name'})
class _IdentityGroupResource(restplus.Resource):
"""Treadmill identity group resource."""
@webutils.get_api(api, cors,
marshal=api.marshal_with,
resp_model=identity_group_model)
def get(self, app_id):
"""Return identity group configuration."""
return impl.get(app_id)
@webutils.post_api(api, cors,
req_model=identity_group_model,
resp_model=identity_group_model)
def post(self, app_id):
"""Creates identity group."""
return impl.create(app_id, flask.request.json)
@webutils.put_api(api, cors,
req_model=identity_group_model,
resp_model=identity_group_model)
def put(self, app_id):
"""Updates identity group configuration."""
return impl.update(app_id, flask.request.json)
@webutils.delete_api(api, cors)
def delete(self, app_id):
"""Deletes identity group."""
return impl.delete(app_id)
| apache-2.0 |
ville-k/tensorflow | tensorflow/python/estimator/canned/head_test.py | 5 | 76329 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import queue_runner_impl
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
# TODO(roumposg): Reuse the code from dnn_testing_utils.
def _assert_close(expected, actual, rtol=1e-04, message='',
name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs((expected - actual) / expected, 'diff')
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=(message, 'Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
summarize=expected.get_shape().num_elements(),
name=scope)
class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
def test_n_classes_is_none(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=None)
def test_n_classes_is_2(self):
with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):
head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=2)
def test_invalid_logits_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((30.,), (42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x2 = np.array(((45, 44), (41, 42),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
spec.loss.eval({
logits_placeholder: logits_2x3,
labels_placeholder: labels_2x2
})
def test_invalid_labels_type(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
labels_2x1 = np.array(((1.,), (1.,),))
logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'Labels dtype'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
def test_invalid_labels_values(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1_with_large_id = np.array(((45,), (1,),), dtype=np.int)
labels_2x1_with_negative_id = np.array(((-5,), (1,),), dtype=np.int)
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesOpError('Label IDs must < n_classes'):
spec.loss.eval({
labels_placeholder: labels_2x1_with_large_id,
logits_placeholder: logits_2x3
})
with self.test_session():
with self.assertRaisesOpError('Label IDs must >= 0'):
spec.loss.eval({
labels_placeholder: labels_2x1_with_negative_id,
logits_placeholder: logits_2x3
})
def test_invalid_labels_sparse_tensor(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
labels_2x1 = sparse_tensor.SparseTensor(
values=['english', 'italian'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))
with self.assertRaisesRegexp(
ValueError, 'SparseTensor labels are not supported.'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x3,
labels=labels_2x1)
def test_incompatible_labels_shape(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
# Logits should be shape (batch_size, 3).
# Labels should be shape (batch_size, 1).
# Here batch sizes are different.
values_3x1 = np.array(((1,), (1,), (1,),))
values_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
head.create_estimator_spec(
features={'x': values_2x3},
mode=model_fn.ModeKeys.EVAL,
logits=values_2x3,
labels=values_3x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_2x3},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(
errors.OpError,
'logits and labels must have the same first dimension'):
spec.loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x3
})
def test_predict(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
expected_class_ids = [[0], [2]]
expected_classes = [[b'0'], [b'2']]
expected_export_classes = [[b'0', b'1', b'2']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
('', _DEFAULT_SERVING_KEY), spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_predict_with_vocabulary_list(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_classes = [[b'aang'], [b'zuko']]
expected_export_classes = [[b'aang', b'iroh', b'zuko']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
def test_weight_should_not_impact_prediction(self):
n_classes = 3
logits = [[1., 0., 0.], [0., 0., 1.]]
expected_probabilities = [[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]]
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
weights_2x1 = [[1.], [2.]]
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_2x1,
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
def test_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval()
for k in value_ops},
rtol=tol,
atol=tol)
def test_eval_with_label_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / 2,
keys.ACCURACY: 0.5, # 1 of 2 labels is correct.
}
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_weighted_multi_example_eval(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights_3x1),
# Weighted accuracy is 1 * 3.0 / sum weights = 0.5
keys.ACCURACY: 0.5,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert loss, and metrics.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol, atol=tol)
def test_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)
logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)
labels = np.array(((1,), (1,)), dtype=np.int64)
expected_train_result = 'my_train_op'
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
}, summary_str, tol)
def test_train_with_one_dim_label_and_weights(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)
weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
expected_train_result = 'my_train_op'
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_rank_1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: (
expected_loss / np.sum(weights_rank_1)),
}, summary_str, tol)
def test_train_with_vocabulary(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])
logits = [[10., 0, 0], [0, 10, 0]]
labels = [[b'iroh'], [b'iroh']]
# loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.
expected_loss = 10.
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
def test_weighted_multi_example_train(self):
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes, weight_column='label_weights')
# Create estimator spec.
logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)
labels = np.array(((1,), (2,), (2,)), dtype=np.int64)
weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)
expected_train_result = 'my_train_op'
# loss = sum(cross_entropy(labels, logits) * [1, 2, 3])
# = sum([10, 10, 0] * [1, 2, 3]) = 30
expected_loss = 30.
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=2)])
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.float32),
'label_weights': weights_3x1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-2
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss mean = sum(cross_entropy(labels, logits) * [1,2,3]) / (1+2+3)
# = sum([10, 10, 0] * [1, 2, 3]) / 6 = 30 / 6
metric_keys.MetricKeys.LOSS_MEAN:
expected_loss / np.sum(weights_3x1),
}, summary_str, tol)
# TODO(ptucker): Add thresholds tests.
class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
def test_threshold_too_small(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0., 0.5))
def test_threshold_too_large(self):
with self.assertRaisesRegexp(ValueError, r'thresholds not in \(0, 1\)'):
head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=(0.5, 1.))
def test_invalid_logits_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Logits should be shape (batch_size, 1).
logits_2x2 = np.array(((45., 44.), (41., 42.),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_2x2)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({
logits_placeholder: logits_2x2
})
def test_invalid_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Labels and logits should be shape (batch_size, 1).
labels_2x2 = np.array(((45., 44.), (41., 42.),))
logits_2x1 = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_2x1,
labels=labels_2x2)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
spec.loss.eval({
logits_placeholder: logits_2x1,
labels_placeholder: labels_2x2
})
def test_incompatible_labels_shape(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Both logits and labels should be shape (batch_size, 1).
values_2x1 = np.array(((0.,), (1.,),))
values_3x1 = np.array(((0.,), (1.,), (0.,),))
# Static shape.
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_estimator_spec(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_2x1,
labels=values_3x1)
with self.assertRaisesRegexp(
ValueError, 'logits and labels must have the same shape'):
head.create_estimator_spec(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=values_3x1,
labels=values_2x1)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_2x1},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
spec.loss.eval({
labels_placeholder: values_2x1,
logits_placeholder: values_3x1
})
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
spec.loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x1
})
def test_predict(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = [[0.3], [-0.4]]
expected_logistics = [[0.574443], [0.401312]]
expected_probabilities = [[0.425557, 0.574443], [0.598688, 0.401312]]
expected_class_ids = [[1], [0]]
expected_classes = [[b'1'], [b'0']]
expected_export_classes = [[b'0', b'1']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual(('', 'classification', 'regression',
_DEFAULT_SERVING_KEY), spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(expected_logistics,
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(expected_class_ids,
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual(expected_classes,
predictions[prediction_keys.PredictionKeys.CLASSES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))
self.assertAllClose(expected_logistics,
sess.run(spec.export_outputs['regression'].value))
def test_predict_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
logits = [[1.], [0.]]
expected_classes = [[b'iroh'], [b'aang']]
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_classes,
sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))
def test_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
# Create estimator spec.
logits = np.array(((45,), (-41,),), dtype=np.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((1,), (1,),), dtype=np.int32))
keys = metric_keys.MetricKeys
expected_metrics = {
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
# loss_mean = loss/2 = 41./2 = 20.5
keys.LOSS_MEAN: 20.5,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(41., loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_eval_with_vocabulary_list(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
label_vocabulary=['aang', 'iroh'])
# Create estimator spec.
logits = np.array(((45,), (-41,),), dtype=np.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=[[b'iroh'], [b'iroh']])
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
sess.run(update_ops)
self.assertAllClose(1. / 2,
value_ops[metric_keys.MetricKeys.ACCURACY].eval())
def test_eval_with_thresholds(self):
thresholds = [0.25, 0.5, 0.75]
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
thresholds=thresholds)
# Create estimator spec.
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.EVAL,
logits=np.array(((-1,), (1,),), dtype=np.float32),
labels=np.array(((1,), (1,),), dtype=np.int32))
# probabilities[i] = 1/(1 + exp(-logits[i])) =>
# probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]
# loss = -sum(ln(probabilities[label[i]])) = -ln(0.269) -ln(0.731)
# = 1.62652338
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: 1.62652338 / 2.,
keys.ACCURACY: 1./2,
keys.PREDICTION_MEAN: 1./2,
keys.LABEL_MEAN: 2./2,
keys.ACCURACY_BASELINE: 2./2,
keys.AUC: 0.,
keys.AUC_PR: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 1.,
keys.PRECISION_AT_THRESHOLD % thresholds[0]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[0]: 1.,
keys.ACCURACY_AT_THRESHOLD % thresholds[1]: .5,
keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1.,
keys.RECALL_AT_THRESHOLD % thresholds[1]: .5,
keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 0.,
keys.PRECISION_AT_THRESHOLD % thresholds[2]: 0.,
keys.RECALL_AT_THRESHOLD % thresholds[2]: 0.,
}
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
def test_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
# Create estimator spec.
logits = np.array(((45,), (-41,),), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41
expected_loss = 41.
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((1,), (1,),), dtype=np.float64),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 41/2 = 20.5
metric_keys.MetricKeys.LOSS_MEAN: 20.5,
}, summary_str)
def test_float_labels_train(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
# Create estimator spec.
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
def _train_op_fn(loss):
with ops.control_dependencies((_assert_close(
math_ops.to_float(expected_loss), math_ops.to_float(loss)),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={'x': np.array([[42]], dtype=np.float32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array([[0.8], [0.4]], dtype=np.float32),
train_op_fn=_train_op_fn)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertEqual(expected_train_result, train_result)
def test_float_labels_eval(self):
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()
# Create estimator spec.
logits = np.array([[0.5], [-0.3]], dtype=np.float32)
spec = head.create_estimator_spec(
features={'x': np.array([[42]], dtype=np.float32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array([[0.8], [0.4]], dtype=np.float32))
# loss = sum(cross_entropy(labels, logits))
# = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))
# = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))
# -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))
# = 1.2484322
expected_loss = 1.2484322
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)
self.assertAlmostEqual(
expected_loss / 2., metrics[metric_keys.MetricKeys.LOSS_MEAN])
def test_weighted_multi_example_predict(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits.astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
_sigmoid(logits).astype(np.float32),
predictions[prediction_keys.PredictionKeys.LOGISTIC])
self.assertAllClose(
[[0., 1.], [1., 0.],
[0., 1.]], predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose([[1], [0], [1]],
predictions[prediction_keys.PredictionKeys.CLASS_IDS])
self.assertAllEqual([[b'1'], [b'0'], [b'1']],
predictions[prediction_keys.PredictionKeys.CLASSES])
def test_weighted_multi_example_eval(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((1,), (1,), (0,)), dtype=np.int32))
# label_mean = (1*1 + .1*1 + 1.5*0)/(1 + .1 + 1.5) = 1.1/2.6
# = .42307692307
expected_label_mean = .42307692307
keys = metric_keys.MetricKeys
expected_metrics = {
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
keys.LOSS_MEAN: 26.9615384615,
# accuracy = (1*1 + .1*0 + 1.5*0)/(1 + .1 + 1.5) = 1/2.6 = .38461538461
keys.ACCURACY: .38461538461,
# prediction_mean = (1*1 + .1*0 + 1.5*1)/(1 + .1 + 1.5) = 2.5/2.6
# = .96153846153
keys.PREDICTION_MEAN: .96153846153,
keys.LABEL_MEAN: expected_label_mean,
keys.ACCURACY_BASELINE: 1 - expected_label_mean,
keys.AUC: .45454565,
keys.AUC_PR: .6737757325172424,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(70.1, loss)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops})
def test_train_with_one_dim_labels_and_weights(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
labels_rank_1 = np.array((1., 1., 0.,))
weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)
self.assertEqual((3,), labels_rank_1.shape)
self.assertEqual((3,), weights_rank_1.shape)
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': weights_rank_1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
def test_weighted_multi_example_train(self):
"""3 examples, 1 batch."""
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column='label_weights')
# Create estimator spec.
logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# losses = label_weights*cross_entropy(labels, logits)
# = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)
# loss = sum(losses) = 1 + 4.1 + 66 = 70.1
expected_loss = 70.1
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((1.,), (1.,), (0.,))),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNotNone(spec.train_op)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((
spec.loss, spec.train_op, spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)
# = 70.1/2.6 = 26.9615384615
metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,
}, summary_str)
class RegressionHeadWithMeanSquaredErrorLossTest(test.TestCase):
def test_invalid_label_dimension(self):
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=-1)
with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):
head_lib._regression_head_with_mean_squared_error_loss(label_dimension=0)
def test_invalid_logits(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
logits_1d = np.array(((45.,), (41.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_1d)
# Dynamic shape.
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),))},
mode=model_fn.ModeKeys.PREDICT,
logits=logits_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({
logits_placeholder: logits_1d
})
def test_incompatible_labels_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=values_3d,
labels=values_1d)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d}, labels=values_3d,
mode=model_fn.ModeKeys.EVAL, logits=values_1d, train_op_fn=None)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.EVAL,
logits=logits_placeholder,
labels=labels_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
spec.loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_incompatible_labels_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=3)
self.assertEqual(3, head.logits_dimension)
values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))
values_1d = np.array(((43.,), (44.,),))
# Static shape.
with self.assertRaisesRegexp(ValueError, 'labels shape'):
head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_3d,
labels=values_1d,
train_op_fn=lambda x: x)
with self.assertRaisesRegexp(ValueError, 'logits shape'):
head.create_estimator_spec(
features={'x': values_3d},
mode=model_fn.ModeKeys.TRAIN,
logits=values_1d,
labels=values_3d,
train_op_fn=lambda x: x)
# Dynamic shape.
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)
spec = head.create_estimator_spec(
features={'x': values_1d},
mode=model_fn.ModeKeys.TRAIN,
logits=logits_placeholder,
labels=labels_placeholder,
train_op_fn=lambda x: x)
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'logits shape'):
spec.loss.eval({
labels_placeholder: values_3d,
logits_placeholder: values_1d
})
with self.test_session():
with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
spec.loss.eval({
labels_placeholder: values_1d,
logits_placeholder: values_3d
})
def test_predict(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.int32)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual(
('', signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY),
spec.export_outputs.keys())
_assert_no_hooks(self, spec)
# Assert predictions.
with self.test_session():
_initialize_variables(self, spec.scaffold)
self.assertAllClose(logits, spec.predictions[prediction_key].eval())
def test_eval(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.float32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((43,), (44,),), dtype=np.int32))
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = (43-45)^2 + (44-41)^2 = 4+9 = 13
self.assertAllClose(13., loss)
# loss_mean = loss/2 = 13/2 = 6.5
expected_loss_mean = 6.5
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_train(self):
head = head_lib._regression_head_with_mean_squared_error_loss()
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,),), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13
expected_loss = 13
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={'x': np.array(((42.,),), dtype=np.float32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((43.,), (44.,),), dtype=np.float64),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/2 = 13/2 = 6.5
metric_keys.MetricKeys.LOSS_MEAN: 6.5,
}, summary_str)
def test_weighted_multi_example_eval(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
expected_loss_mean = 39.0769231
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weight_with_numeric_column(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column=feature_column_lib.numeric_column(
'label_weights', normalizer_fn=lambda x: x + 1.))
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.int32)
spec = head.create_estimator_spec(
features={
'x':
np.array(((42,), (43,), (44,)), dtype=np.int32),
'label_weights':
np.array(((0.,), (-0.9,), (0.5,)), dtype=np.float32),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35,), (42,), (45,)), dtype=np.int32))
# Assert loss.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss = sess.run(spec.loss)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
def test_weighted_multi_example_train(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42,), (43,), (44,)), dtype=np.float32),
'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((35.,), (42.,), (45.,)), dtype=np.float32),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_with_one_dim_label_and_weight(self):
"""1d label, 3 examples, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45,), (41,), (44,)), dtype=np.float32)
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)
weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)
labels_rank_1 = np.array((35., 42., 45.,))
self.assertEqual((3,), x_feature_rank_1.shape)
self.assertEqual((3,), weight_rank_1.shape)
self.assertEqual((3,), labels_rank_1.shape)
spec = head.create_estimator_spec(
features={
'x': x_feature_rank_1,
'label_weights': weight_rank_1,
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels_rank_1,
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231
metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,
}, summary_str)
def test_weighted_multi_value_eval(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45., 41., 44.),))
spec = head.create_estimator_spec(
features={
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),)),
},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=np.array(((35., 42., 45.),)))
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN]
predictions, loss, loss_mean = sess.run((
spec.predictions[prediction_key], spec.loss, loss_mean_update_op))
self.assertAllClose(logits, predictions)
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
self.assertAllClose(101.6, loss)
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_loss_mean = 39.076923
# Check results of both update (in `loss_mean`) and value ops.
self.assertAllClose(expected_loss_mean, loss_mean)
self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())
def test_weighted_multi_value_train(self):
"""3d label, 1 example, 1 batch."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights', label_dimension=3)
self.assertEqual(3, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45., 41., 44.),))
expected_train_result = b'my_train_op'
# loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6
expected_loss = 101.6
def _train_op_fn(loss):
with ops.control_dependencies((check_ops.assert_equal(
math_ops.to_float(expected_loss), math_ops.to_float(loss),
name='assert_loss'),)):
return constant_op.constant(expected_train_result)
spec = head.create_estimator_spec(
features={
'x': np.array(((42., 43., 44.),)),
'label_weights': np.array(((1., .1, 1.5),)),
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=np.array(((35., 42., 45.),)),
train_op_fn=_train_op_fn)
# Assert spec contains expected tensors.
prediction_key = prediction_keys.PredictionKeys.PREDICTIONS
self.assertItemsEqual((prediction_key,), spec.predictions.keys())
self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Evaluate predictions, loss, train_op, and summaries.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
predictions, loss, train_result, summary_str = sess.run((
spec.predictions[prediction_key], spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(logits, predictions)
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
metric_keys.MetricKeys.LOSS_MEAN: 39.076923,
}, summary_str)
def test_weighted_multi_batch_eval(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.EVAL,
logits=batched_logits,
labels=batched_labels,
train_op_fn=None)
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
# loss = sum(losses) = 100+.1+1.5 = 101.6
# loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923
expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
_assert_no_hooks(self, spec)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
# Run tensors for `steps` steps.
steps = len(logits)
results = tuple([
sess.run((
spec.loss,
# The `[1]` gives us the metric update op.
{k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
)) for _ in range(steps)
])
# Assert losses and metrics.
self.assertAllClose((100, .1, 1.5), [r[0] for r in results])
# For metrics, check results of both update (in `results`) and value ops.
# Note: we only check the result of the last step for streaming metrics.
self.assertAllClose(expected_metrics, results[steps - 1][1])
self.assertAllClose(expected_metrics, {
k: spec.eval_metric_ops[k][0].eval() for k in spec.eval_metric_ops
})
def test_weighted_multi_batch_train(self):
"""1d label, 1 example, 3 batches."""
head = head_lib._regression_head_with_mean_squared_error_loss(
weight_column='label_weights')
self.assertEqual(1, head.logits_dimension)
# Create estimator spec.
logits = np.array(((45.,), (41.,), (44.,)))
input_fn = numpy_io.numpy_input_fn(
x={
'x': np.array(((42.,), (43.,), (44.,))),
'label_weights': np.array(((1.,), (.1,), (1.5,))),
# 'logits' is not a feature, but we use `numpy_input_fn` to make a
# batched version of it, and pop it off before passing to
# `create_estimator_spec`.
'logits': logits,
},
y=np.array(((35.,), (42.,), (45.,))),
batch_size=1,
num_epochs=1,
shuffle=False)
batched_features, batched_labels = input_fn()
batched_logits = batched_features.pop('logits')
spec = head.create_estimator_spec(
features=batched_features,
mode=model_fn.ModeKeys.TRAIN,
logits=batched_logits,
labels=batched_labels,
train_op_fn=lambda loss: loss * -7.)
# Assert spec contains expected tensors.
self.assertEqual(dtypes.float32, spec.loss.dtype)
self.assertIsNotNone(spec.train_op)
with self.test_session() as sess:
# Finalize graph and initialize variables.
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
queue_runner_impl.start_queue_runners()
results = tuple([
sess.run((spec.loss, spec.train_op)) for _ in range(len(logits))
])
# losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]
expected_losses = np.array((100, .1, 1.5))
self.assertAllClose(expected_losses, [r[0] for r in results])
self.assertAllClose(expected_losses * -7., [r[1] for r in results])
if __name__ == '__main__':
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.