repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
citrix-openstack-build/swift3
|
swift3/test/unit/test_swift3.py
|
2
|
32421
|
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import cgi
import hashlib
import base64
import xml.dom.minidom
import simplejson
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPCreated,HTTPNoContent, HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPConflict, HTTPForbidden, HTTPRequestEntityTooLarge
from swift3 import middleware as swift3
class FakeApp(object):
def __init__(self):
self.app = self
self.response_args = []
def __call__(self, env, start_response):
return "FAKE APP"
def do_start_response(self, *args):
self.response_args.extend(args)
class FakeAppService(FakeApp):
def __init__(self, status=200):
FakeApp.__init__(self)
self.status = status
self.buckets = (('apple', 1, 200), ('orange', 3, 430))
def __call__(self, env, start_response):
if self.status == 200:
start_response(Response().status, [('Content-Type', 'text/xml')])
json_pattern = ['"name":%s', '"count":%s', '"bytes":%s']
json_pattern = '{' + ','.join(json_pattern) + '}'
json_out = []
for b in self.buckets:
name = simplejson.dumps(b[0])
json_out.append(json_pattern %
(name, b[1], b[2]))
account_list = '[' + ','.join(json_out) + ']'
return account_list
elif self.status == 401:
start_response(HTTPUnauthorized().status, [])
elif self.status == 403:
start_response(HTTPForbidden().status, [])
else:
start_response(HTTPBadRequest().status, [])
return []
class FakeAppBucket(FakeApp):
def __init__(self, status=200):
FakeApp.__init__(self)
self.status = status
self.objects = (('rose', '2011-01-05T02:19:14.275290', 0, 303),
('viola', '2011-01-05T02:19:14.275290', 0, 3909),
('lily', '2011-01-05T02:19:14.275290', 0, 3909))
def __call__(self, env, start_response):
if env['REQUEST_METHOD'] == 'GET':
if self.status == 200:
start_response(Response().status,
[('Content-Type', 'text/xml')])
json_pattern = ['"name":%s', '"last_modified":%s', '"hash":%s',
'"bytes":%s']
json_pattern = '{' + ','.join(json_pattern) + '}'
json_out = []
for b in self.objects:
name = simplejson.dumps(b[0])
time = simplejson.dumps(b[1])
json_out.append(json_pattern %
(name, time, b[2], b[3]))
account_list = '[' + ','.join(json_out) + ']'
return account_list
elif self.status == 401:
start_response(HTTPUnauthorized().status, [])
elif self.status == 403:
start_response(HTTPForbidden().status, [])
elif self.status == 404:
start_response(HTTPNotFound().status, [])
else:
start_response(HTTPBadRequest().status, [])
elif env['REQUEST_METHOD'] == 'PUT':
if self.status == 201:
start_response(HTTPCreated().status, [])
elif self.status == 401:
start_response(HTTPUnauthorized().status, [])
elif self.status == 403:
start_response(HTTPForbidden().status, [])
elif self.status == 202:
start_response(HTTPAccepted().status, [])
else:
start_response(HTTPBadRequest().status, [])
elif env['REQUEST_METHOD'] == 'DELETE':
if self.status == 204:
start_response(HTTPNoContent().status, [])
elif self.status == 401:
start_response(HTTPUnauthorized().status, [])
elif self.status == 403:
start_response(HTTPForbidden().status, [])
elif self.status == 404:
start_response(HTTPNotFound().status, [])
elif self.status == 409:
start_response(HTTPConflict().status, [])
else:
start_response(HTTPBadRequest().status, [])
return []
class FakeAppObject(FakeApp):
def __init__(self, status=200):
FakeApp.__init__(self)
self.status = status
self.object_body = 'hello'
self.response_headers = {'Content-Type': 'text/html',
'Content-Length': len(self.object_body),
'x-object-meta-test': 'swift',
'etag': '1b2cf535f27731c974343645a3985328',
'last-modified': '2011-01-05T02:19:14.275290'}
def __call__(self, env, start_response):
req = Request(env)
if env['REQUEST_METHOD'] == 'GET' or env['REQUEST_METHOD'] == 'HEAD':
if self.status == 200:
if 'HTTP_RANGE' in env:
resp = Response(request=req, body=self.object_body,
conditional_response=True)
return resp(env, start_response)
start_response(Response(request=req).status,
self.response_headers.items())
if env['REQUEST_METHOD'] == 'GET':
return self.object_body
elif self.status == 401:
start_response(HTTPUnauthorized(request=req).status, [])
elif self.status == 403:
start_response(HTTPForbidden(request=req).status, [])
elif self.status == 404:
start_response(HTTPNotFound(request=req).status, [])
else:
start_response(HTTPBadRequest(request=req).status, [])
elif env['REQUEST_METHOD'] == 'PUT':
if self.status == 201:
start_response(HTTPCreated(request=req).status,
[('etag', self.response_headers['etag'])])
elif self.status == 401:
start_response(HTTPUnauthorized(request=req).status, [])
elif self.status == 403:
start_response(HTTPForbidden(request=req).status, [])
elif self.status == 404:
start_response(HTTPNotFound(request=req).status, [])
elif self.status == 413:
start_response(HTTPRequestEntityTooLarge(request=req).status,
[])
else:
start_response(HTTPBadRequest(request=req).status, [])
elif env['REQUEST_METHOD'] == 'DELETE':
if self.status == 204:
start_response(HTTPNoContent(request=req).status, [])
elif self.status == 401:
start_response(HTTPUnauthorized(request=req).status, [])
elif self.status == 403:
start_response(HTTPForbidden(request=req).status, [])
elif self.status == 404:
start_response(HTTPNotFound(request=req).status, [])
else:
start_response(HTTPBadRequest(request=req).status, [])
return []
def start_response(*args):
pass
class TestSwift3(unittest.TestCase):
def setUp(self):
self.app = swift3.filter_factory({})(FakeApp())
def test_non_s3_request_passthrough(self):
req = Request.blank('/something')
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
def test_bad_format_authorization(self):
req = Request.blank('/something',
headers={'Authorization': 'hoge'})
resp = self.app(req.environ, start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'Error')
code = dom.getElementsByTagName('Code')[0].childNodes[0].nodeValue
self.assertEquals(code, 'AccessDenied')
def test_bad_method(self):
req = Request.blank('/',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = self.app(req.environ, start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'Error')
code = dom.getElementsByTagName('Code')[0].childNodes[0].nodeValue
self.assertEquals(code, 'InvalidURI')
def _test_method_error(self, cl, method, path, status, headers={}):
local_app = swift3.filter_factory({})(cl(status))
headers.update({'Authorization': 'AWS test:tester:hmac'})
req = Request.blank(path, environ={'REQUEST_METHOD': method},
headers=headers)
resp = local_app(req.environ, start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'Error')
return dom.getElementsByTagName('Code')[0].childNodes[0].nodeValue
def test_service_GET_error(self):
code = self._test_method_error(FakeAppService, 'GET', '/', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppService, 'GET', '/', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppService, 'GET', '/', 0)
self.assertEquals(code, 'InvalidURI')
def test_service_GET(self):
local_app = swift3.filter_factory({})(FakeAppService())
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'ListAllMyBucketsResult')
buckets = [n for n in dom.getElementsByTagName('Bucket')]
listing = [n for n in buckets[0].childNodes if n.nodeName != '#text']
self.assertEquals(len(listing), 2)
names = []
for b in buckets:
if b.childNodes[0].nodeName == 'Name':
names.append(b.childNodes[0].childNodes[0].nodeValue)
self.assertEquals(len(names), len(FakeAppService().buckets))
for i in FakeAppService().buckets:
self.assertTrue(i[0] in names)
def test_bucket_GET_error(self):
code = self._test_method_error(FakeAppBucket, 'GET', '/bucket', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'GET', '/bucket', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'GET', '/bucket', 404)
self.assertEquals(code, 'NoSuchBucket')
code = self._test_method_error(FakeAppBucket, 'GET', '/bucket', 0)
self.assertEquals(code, 'InvalidURI')
def test_bucket_GET(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
bucket_name = 'junk'
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'ListBucketResult')
name = dom.getElementsByTagName('Name')[0].childNodes[0].nodeValue
self.assertEquals(name, bucket_name)
objects = [n for n in dom.getElementsByTagName('Contents')]
names = []
for o in objects:
if o.childNodes[0].nodeName == 'Key':
names.append(o.childNodes[0].childNodes[0].nodeValue)
if o.childNodes[1].nodeName == 'LastModified':
self.assertTrue(
o.childNodes[1].childNodes[0].nodeValue.endswith('Z'))
self.assertEquals(len(names), len(FakeAppBucket().objects))
for i in FakeAppBucket().objects:
self.assertTrue(i[0] in names)
def test_bucket_GET_is_truncated(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
bucket_name = 'junk'
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'max-keys=3'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.getElementsByTagName('IsTruncated')[0].
childNodes[0].nodeValue, 'false')
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'max-keys=2'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.getElementsByTagName('IsTruncated')[0].
childNodes[0].nodeValue, 'true')
def test_bucket_GET_max_keys(self):
class FakeApp(object):
def __call__(self, env, start_response):
self.query_string = env['QUERY_STRING']
start_response('200 OK', [])
return '[]'
fake_app = FakeApp()
local_app = swift3.filter_factory({})(fake_app)
bucket_name = 'junk'
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'max-keys=5'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, lambda *args: None)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.getElementsByTagName('MaxKeys')[0].
childNodes[0].nodeValue, '5')
args = dict(cgi.parse_qsl(fake_app.query_string))
self.assert_(args['limit'] == '6')
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'max-keys=5000'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, lambda *args: None)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.getElementsByTagName('MaxKeys')[0].
childNodes[0].nodeValue, '1000')
args = dict(cgi.parse_qsl(fake_app.query_string))
self.assertEquals(args['limit'], '1001')
def test_bucket_GET_passthroughs(self):
class FakeApp(object):
def __call__(self, env, start_response):
self.query_string = env['QUERY_STRING']
start_response('200 OK', [])
return '[]'
fake_app = FakeApp()
local_app = swift3.filter_factory({})(fake_app)
bucket_name = 'junk'
req = Request.blank('/%s' % bucket_name,
environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING':
'delimiter=a&marker=b&prefix=c'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, lambda *args: None)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.getElementsByTagName('Prefix')[0].
childNodes[0].nodeValue, 'c')
self.assertEquals(dom.getElementsByTagName('Marker')[0].
childNodes[0].nodeValue, 'b')
self.assertEquals(dom.getElementsByTagName('Delimiter')[0].
childNodes[0].nodeValue, 'a')
args = dict(cgi.parse_qsl(fake_app.query_string))
self.assertEquals(args['delimiter'], 'a')
self.assertEquals(args['marker'], 'b')
self.assertEquals(args['prefix'], 'c')
def test_bucket_PUT_error(self):
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 201,
headers={'Content-Length': 'a'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 201,
headers={'Content-Length': '-1'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 202)
self.assertEquals(code, 'BucketAlreadyExists')
code = self._test_method_error(FakeAppBucket, 'PUT', '/bucket', 0)
self.assertEquals(code, 'InvalidURI')
def test_bucket_PUT(self):
local_app = swift3.filter_factory({})(FakeAppBucket(201))
req = Request.blank('/bucket',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
def test_bucket_DELETE_error(self):
code = self._test_method_error(FakeAppBucket, 'DELETE', '/bucket', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'DELETE', '/bucket', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppBucket, 'DELETE', '/bucket', 404)
self.assertEquals(code, 'NoSuchBucket')
code = self._test_method_error(FakeAppBucket, 'DELETE', '/bucket', 409)
self.assertEquals(code, 'BucketNotEmpty')
code = self._test_method_error(FakeAppBucket, 'DELETE', '/bucket', 0)
self.assertEquals(code, 'InvalidURI')
def test_bucket_DELETE(self):
local_app = swift3.filter_factory({})(FakeAppBucket(204))
req = Request.blank('/bucket',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '204')
def _check_acl(self, owner, resp):
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'AccessControlPolicy')
name = dom.getElementsByTagName('Permission')[0].childNodes[0].nodeValue
self.assertEquals(name, 'FULL_CONTROL')
name = dom.getElementsByTagName('ID')[0].childNodes[0].nodeValue
self.assertEquals(name, owner)
def test_bucket_acl_GET(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
bucket_name = 'junk'
req = Request.blank('/%s?acl' % bucket_name,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self._check_acl('test:tester', resp)
def test_bucket_versioning_GET(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
bucket_name = 'junk'
req = Request.blank('/%s?versioning' % bucket_name,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
dom = xml.dom.minidom.parseString("".join(resp))
self.assertEquals(dom.firstChild.nodeName, 'VersioningConfiguration')
def _test_object_GETorHEAD(self, method):
local_app = swift3.filter_factory({})(FakeAppObject())
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
headers = dict((k.lower(), v) for k, v in
local_app.app.response_args[1])
for key, val in local_app.app.response_headers.iteritems():
if key in ('content-length', 'content-type', 'content-encoding',
'etag', 'last-modified'):
self.assertTrue(key in headers)
self.assertEquals(headers[key], val)
elif key.startswith('x-object-meta-'):
self.assertTrue('x-amz-meta-' + key[14:] in headers)
self.assertEquals(headers['x-amz-meta-' + key[14:]], val)
if method == 'GET':
self.assertEquals(''.join(resp), local_app.app.object_body)
def test_object_HEAD(self):
self._test_object_GETorHEAD('HEAD')
def test_object_GET_error(self):
code = self._test_method_error(FakeAppObject, 'GET',
'/bucket/object', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'GET',
'/bucket/object', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'GET',
'/bucket/object', 404)
self.assertEquals(code, 'NoSuchKey')
code = self._test_method_error(FakeAppObject, 'GET',
'/bucket/object', 0)
self.assertEquals(code, 'InvalidURI')
def test_object_GET(self):
self._test_object_GETorHEAD('GET')
def test_object_GET_Range(self):
local_app = swift3.filter_factory({})(FakeAppObject())
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': 'bytes=0-3'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '206')
headers = dict((k.lower(), v) for k, v in
local_app.app.response_args[1])
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
def test_object_PUT_error(self):
code = self._test_method_error(FakeAppObject, 'PUT',
'/bucket/object', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'PUT',
'/bucket/object', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'PUT',
'/bucket/object', 404)
self.assertEquals(code, 'NoSuchBucket')
code = self._test_method_error(FakeAppObject, 'PUT',
'/bucket/object', 413)
self.assertEquals(code, 'EntityTooLarge')
code = self._test_method_error(FakeAppObject, 'PUT',
'/bucket/object', 0)
self.assertEquals(code, 'InvalidURI')
def test_object_PUT(self):
local_app = swift3.filter_factory({})(FakeAppObject(201))
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'x-amz-storage-class': 'REDUCED_REDUNDANCY',
'Content-MD5': 'Gyz1NfJ3Mcl0NDZFo5hTKA=='})
req.date = datetime.now()
req.content_type = 'text/plain'
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
headers = dict((k.lower(), v) for k, v in
local_app.app.response_args[1])
self.assertEquals(headers['etag'],
"\"%s\"" % local_app.app.response_headers['etag'])
def test_object_PUT_headers(self):
class FakeApp(object):
def __call__(self, env, start_response):
self.req = Request(env)
start_response('200 OK', [])
return []
app = FakeApp()
local_app = swift3.filter_factory({})(app)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'X-Amz-Storage-Class': 'REDUCED_REDUNDANCY',
'X-Amz-Meta-Something': 'oh hai',
'X-Amz-Copy-Source': '/some/source',
'Content-MD5': 'ffoHqOWd280dyE1MT4KuoQ=='})
req.date = datetime.now()
req.content_type = 'text/plain'
resp = local_app(req.environ, lambda *args: None)
self.assertEquals(app.req.headers['ETag'],
'7dfa07a8e59ddbcd1dc84d4c4f82aea1')
self.assertEquals(app.req.headers['X-Object-Meta-Something'], 'oh hai')
self.assertEquals(app.req.headers['X-Copy-From'], '/some/source')
def test_object_DELETE_error(self):
code = self._test_method_error(FakeAppObject, 'DELETE',
'/bucket/object', 401)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'DELETE',
'/bucket/object', 403)
self.assertEquals(code, 'AccessDenied')
code = self._test_method_error(FakeAppObject, 'DELETE',
'/bucket/object', 404)
self.assertEquals(code, 'NoSuchKey')
code = self._test_method_error(FakeAppObject, 'DELETE',
'/bucket/object', 0)
self.assertEquals(code, 'InvalidURI')
def test_object_DELETE(self):
local_app = swift3.filter_factory({})(FakeAppObject(204))
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '204')
def test_object_multi_DELETE(self):
local_app = swift3.filter_factory({})(FakeAppBucket())
body = '<?xml version="1.0" encoding="UTF-8"?> \
<Delete>\
<Object>\
<Key>Key1</Key>\
</Object>\
<Object>\
<Key>Key2</Key>\
</Object>\
</Delete>'
req = Request.blank('/bucket?delete',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'},
body=body)
req.date = datetime.now()
req.content_type = 'text/plain'
resp = local_app(req.environ, local_app.app.do_start_response)
self.assertEquals(local_app.app.response_args[0].split()[0], '200')
def test_object_acl_GET(self):
local_app = swift3.filter_factory({})(FakeAppObject())
req = Request.blank('/bucket/object?acl',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
resp = local_app(req.environ, local_app.app.do_start_response)
self._check_acl('test:tester', resp)
def test_canonical_string(self):
"""
The hashes here were generated by running the same requests against
boto.utils.canonical_string
"""
def verify(hash, path, headers):
req = Request.blank(path, headers=headers)
self.assertEquals(hash,
hashlib.md5(swift3.canonical_string(req)).hexdigest())
verify('6dd08c75e42190a1ce9468d1fd2eb787', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-Something': 'test',
'Date': 'whatever'})
verify('c8447135da232ae7517328f3429df481', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-Something': 'test'})
verify('bf49304103a4de5c325dce6384f2a4a2', '/bucket/object',
{'content-type': 'text/plain'})
verify('be01bd15d8d47f9fe5e2d9248cc6f180', '/bucket/object', {})
verify('8d28cc4b8322211f6cc003256cd9439e', 'bucket/object',
{'Content-MD5': 'somestuff'})
verify('a822deb31213ad09af37b5a7fe59e55e', '/bucket/object?acl', {})
verify('cce5dd1016595cb706c93f28d3eaa18f', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-A': 'test',
'X-Amz-Z': 'whatever', 'X-Amz-B': 'lalala',
'X-Amz-Y': 'lalalalalalala'})
verify('7506d97002c7d2de922cc0ec34af8846', '/bucket/object',
{'Content-Type': None, 'X-Amz-Something': 'test'})
verify('28f76d6162444a193b612cd6cb20e0be', '/bucket/object',
{'Content-Type': None,
'X-Amz-Date': 'Mon, 11 Jul 2011 10:52:57 +0000',
'Date': 'Tue, 12 Jul 2011 10:52:57 +0000'})
verify('ed6971e3eca5af4ee361f05d7c272e49', '/bucket/object',
{'Content-Type': None,
'Date': 'Tue, 12 Jul 2011 10:52:57 +0000'})
req1 = Request.blank('/', headers=
{'Content-Type': None, 'X-Amz-Something': 'test'})
req2 = Request.blank('/', headers=
{'Content-Type': '', 'X-Amz-Something': 'test'})
req3 = Request.blank('/', headers={'X-Amz-Something': 'test'})
self.assertEquals(swift3.canonical_string(req1),
swift3.canonical_string(req2))
self.assertEquals(swift3.canonical_string(req2),
swift3.canonical_string(req3))
def test_signed_urls(self):
class FakeApp(object):
def __call__(self, env, start_response):
self.req = Request(env)
start_response('200 OK', [])
return []
app = FakeApp()
local_app = swift3.filter_factory({})(app)
req = Request.blank('/bucket/object?Signature=X&Expires=Y&'
'AWSAccessKeyId=Z', environ={'REQUEST_METHOD': 'GET'})
req.headers['Date'] = datetime.utcnow()
req.content_type = 'text/plain'
resp = local_app(req.environ, lambda *args: None)
self.assertEquals(req.headers['Authorization'], 'AWS Z:X')
self.assertEquals(req.headers['Date'], 'Y')
def test_token_generation(self):
req = Request.blank('/bucket/object?uploadId=123456789abcdef'
'&partNumber=1',
environ={'REQUEST_METHOD': 'PUT'})
req.headers['Authorization'] = 'AWS X:Y'
resp = self.app(req.environ, start_response)
self.assertEquals(base64.urlsafe_b64decode(
req.headers['X-Auth-Token']),
'PUT\n\n\n/bucket/object?partNumber=1'
'&uploadId=123456789abcdef')
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
nickpandolfi/linereader
|
linereader/__init__.py
|
2
|
1727
|
__author__ = 'Nicholas C Pandolfi'
#The MIT License (MIT)
#
#Copyright (c) 2014 Nicholas C Pandolfi
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'''
Reads randomly accessed lines from a text file faster than Python\'s built-in linecache,
and creates dynamic data types for the manipulation of massive data sets.
'''
from .c_reader import copen
from .d_reader import dopen
from .lrcompiler import construct, visconstruct, precompile, stackcompile, dircompile
from .lrcompiler import load, build, cachebuild, cnlcount, getline, getonce, clearcache
#initial commit
#Add all files to initiate the project
#polymorphesized copen and dopen, removed cython libraries,
#multithreaded a precompiler, added extra utility functions
|
mit
|
mbbill/shadowsocks
|
tests/nose_plugin.py
|
1072
|
1164
|
#!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nose
from nose.plugins.base import Plugin
class ExtensionPlugin(Plugin):
name = "ExtensionPlugin"
def options(self, parser, env):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.enabled = True
def wantFile(self, file):
return file.endswith('.py')
def wantDirectory(self, directory):
return True
def wantModule(self, file):
return True
if __name__ == '__main__':
nose.main(addplugins=[ExtensionPlugin()])
|
apache-2.0
|
otheng03/nbase-arc
|
redis-3.2.9/tests/nbase-arc/redis.py
|
3
|
3992
|
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from socket import *
from itertools import imap
import re
class RedisClient:
''' simple (and slow) redis client that works only in request/response mode '''
def __init__( self, ip, port ):
self.ip = ip
self.port = port
self.sock = socket(AF_INET, SOCK_STREAM)
self.sock.connect((ip, port))
def close(self):
self.sock.close()
def io_read(self, length = -1):
s = self.sock
if length == -1: # read a line that ends with \r\n
prev_c = c = None
ret = ''
while True:
prev_c = c
c = s.recv(1)
if not c:
raise 'socket receive error'
if prev_c == '\r' and c == '\n':
return ret[:-1]
ret = ret + c
else:
ret = ''
while length > len(ret):
c = s.recv(length - len(ret))
if not c:
raise 'socket receive error'
ret = ret + c
return ret
def read_response(self):
payload = self.io_read()
prefix, data = payload[0], payload[1:].strip()
if prefix == "+": # ok
return data
elif prefix == "-": # error
return data
elif prefix == ":": # integer reply
len = int(data)
return len
elif prefix == "$": # bulk reply
len = int(data)
if len == -1:
return None
nextchunk = self.io_read(len+2)
return nextchunk[:-2]
elif prefix == "*": # multibulk reply
count = int(data)
if count == -1:
return None
resp = []
for i in range (0, count):
r = self.read_response()
resp.append(r)
return resp
else:
raise 'read_response: Protocol error'
def raw_write(self, data):
self.sock.sendall(data)
def do_raw_request(self, data):
''' send request confirming RESP '''
self.sock.sendall(data)
return self.read_response()
def do_inline_request(self, cmd):
''' inline request'''
return self.do_raw_request(cmd + '\r\n')
def do_generic_request(self, *args):
'''array of bulk request'''
rqst = ''.join(('*', str(len(args)),'\r\n'))
for arg in imap(encode, args):
rqst = ''.join((rqst, '$', str(len(arg)), '\r\n', arg, '\r\n'))
return self.do_raw_request(rqst)
def encode(value):
"Return a bytestring representation of the value"
if isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = str(value)
elif isinstance(value, float):
value = repr(value)
elif not isinstance(value, basestring):
value = unicode(value)
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def rr_assert_equal (l1, r2):
if str(l1) != str(r2):
print("\n===ASSERT EQUAL===")
print(str(l1))
print(str(r2))
print("==================")
assert str(l1) == str(r2)
def rr_assert_substring(subs, r):
if re.search(subs, r) == None:
print("\n==ASSERT_SUBSTRING===")
print(str(subs))
print(str(r))
print("==================")
assert re.search(subs, r) != None
def rr_toint(r):
return int(r)
|
apache-2.0
|
dellysunnymtech/sakoman-oe
|
contrib/sakoman/build/extras/autobuilder/ipkg-utils/arfile.py
|
2
|
3493
|
"""
arfile - A module to parse GNU ar archives.
Copyright (c) 2006-7 Paul Sokolovsky
This file is released under the terms
of GNU General Public License v2 or later.
"""
import sys
import os
import tarfile
class FileSection:
"A class which allows to treat portion of file as separate file object."
def __init__(self, f, offset, size):
self.f = f
self.offset = offset
self.size = size
self.seek(0, 0)
def seek(self, offset, whence = 0):
# print "seek(%x, %d)" % (offset, whence)
if whence == 0:
return self.f.seek(offset + self.offset, whence)
elif whence == 1:
return self.f.seek(offset, whence)
elif whence == 2:
return self.f.seek(self.offset + self.size + offset, 0)
else:
assert False
def tell(self):
# print "tell()"
return self.f.tell() - self.offset
def read(self, size = -1):
# print "read(%d)" % size
return self.f.read(size)
class ArFile:
def __init__(self, f):
self.f = f
self.directory = {}
self.directoryRead = False
signature = self.f.readline()
assert signature == "!<arch>\n"
self.directoryOffset = self.f.tell()
def open(self, fname):
if self.directory.has_key(fname):
return FileSection(self.f, self.directory[fname][-1], int(self.directory[fname][5]))
if self.directoryRead:
raise IOError, (2, "AR member not found: " + fname)
f = self._scan(fname)
if f == None:
raise IOError, (2, "AR member not found: " + fname)
return f
def _scan(self, fname):
self.f.seek(self.directoryOffset, 0)
while True:
l = self.f.readline()
if not l:
self.directoryRead = True
return None
if l == "\n":
l = self.f.readline()
if not l: break
descriptor = l.split()
# print descriptor
size = int(descriptor[5])
memberName = descriptor[0][:-1]
self.directory[memberName] = descriptor + [self.f.tell()]
# print "read:", memberName
if memberName == fname or (memberName.startswith("`") and memberName[1:] == fname):
# Record directory offset to start from next time
self.directoryOffset = self.f.tell() + size
return FileSection(self.f, self.f.tell(), size)
# Skip data and loop
if size % 2:
size = size + 1
data = self.f.seek(size, 1)
# print hex(f.tell())
if __name__ == "__main__":
if None:
f = open(sys.argv[1], "rb")
ar = ArFile(f)
tarStream = ar.open("data.tar.gz")
print "--------"
tarStream = ar.open("data.tar.gz")
print "--------"
tarStream = ar.open("control.tar.gz")
print "--------"
tarStream = ar.open("control.tar.gz2")
sys.exit(0)
dir = "."
if len(sys.argv) > 1:
dir = sys.argv[1]
for f in os.listdir(dir):
if not f.endswith(".ipk"): continue
print "=== %s ===" % f
f = open(dir + "/" + f, "rb")
ar = ArFile(f)
tarStream = ar.open("control.tar.gz")
tarf = tarfile.open("control.tar.gz", "r", tarStream)
#tarf.list()
f2 = tarf.extractfile("control")
print f2.read()
|
mit
|
twobob/buildroot-kindle
|
output/build/host-python-2.7.2/Lib/distutils/version.py
|
259
|
11433
|
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
|
gpl-2.0
|
Wang-OuYang/WarChess
|
cocos2d/plugin/tools/toolsForGame/modifyProject.py
|
255
|
1300
|
import sys, string, os
from xml.etree import ElementTree as ET
from xml.dom import minidom
projFile = sys.argv[1]
targetPath = sys.argv[2]
def getLinkElement():
global targetPath
ret = ET.Element('link')
nameEle = ET.Element('name')
nameEle.text = 'plugin-x'
typeEle = ET.Element('type')
typeEle.text = '2'
locEle = ET.Element('locationURI')
locEle.text = targetPath
ret.append(nameEle)
ret.append(typeEle)
ret.append(locEle)
return ret
tree = ET.parse(projFile)
root = tree.getroot()
nodeLinkRes = root.find('linkedResources')
if nodeLinkRes != None:
linkNodes = nodeLinkRes.findall('link')
haveTarget = False
if linkNodes != None and len(linkNodes) > 0:
for node in linkNodes:
locNode = node.find('locationURI')
if locNode == None:
continue
tempText = locNode.text
tempText = tempText.strip(' \n\r\t')
if tempText == targetPath:
haveTarget = True
break
if not haveTarget:
nodeLinkRes.append(getLinkElement())
tree.write(projFile, 'UTF-8')
else:
linkResEle = ET.Element('linkedResources')
linkResEle.append(getLinkElement())
root.append(linkResEle)
tree.write(projFile, 'UTF-8')
|
gpl-2.0
|
insertnamehere1/maraschino
|
lib/sqlalchemy/util/deprecations.py
|
22
|
3906
|
# util/deprecations.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
from sqlalchemy import exc
import warnings
import re
from langhelpers import decorator
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text)
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(wtype(message), stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
docs = doc and doc.expandtabs().split('\n') or []
indent = ''
for line in docs[1:]:
text = line.lstrip()
if text:
indent = line[0:len(line) - len(text)]
break
point = min(len(docs), 1)
docs.insert(point, '\n' + indent + docstring_header.rstrip())
doc = '\n'.join(docs)
decorated = warned(func)
decorated.__doc__ = doc
return decorated
|
mit
|
moonboots/tensorflow
|
tensorflow/python/tools/freeze_graph.py
|
15
|
5585
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts checkpoint variables into Const ops in a standalone GraphDef file.
This script is designed to take a GraphDef proto, a SaverDef proto, and a set of
variable values stored in a checkpoint file, and output a GraphDef with all of
the variable ops converted into const ops containing the values of the
variables.
It's useful to do this when we need to load a single file in C++, especially in
environments like mobile or embedded where we may not have access to the
RestoreTensor ops and file loading calls that they rely on.
An example of command-line usage is:
bazel build tensorflow/python/tools:freeze_graph && \
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=some_graph_def.pb \
--input_checkpoint=model.ckpt-8361242 \
--output_graph=/tmp/frozen_graph.pb --output_node_names=softmax
You can also look at freeze_graph_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.client import graph_util
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("input_graph", "",
"""TensorFlow 'GraphDef' file to load.""")
tf.app.flags.DEFINE_string("input_saver", "",
"""TensorFlow saver file to load.""")
tf.app.flags.DEFINE_string("input_checkpoint", "",
"""TensorFlow variables file to load.""")
tf.app.flags.DEFINE_string("output_graph", "",
"""Output 'GraphDef' file name.""")
tf.app.flags.DEFINE_boolean("input_binary", False,
"""Whether the input files are in binary format.""")
tf.app.flags.DEFINE_string("output_node_names", "",
"""The name of the output nodes, comma separated.""")
tf.app.flags.DEFINE_string("restore_op_name", "save/restore_all",
"""The name of the master restore operator.""")
tf.app.flags.DEFINE_string("filename_tensor_name", "save/Const:0",
"""The name of the tensor holding the save path.""")
tf.app.flags.DEFINE_boolean("clear_devices", True,
"""Whether to remove device specifications.""")
tf.app.flags.DEFINE_string("initializer_nodes", "", "comma separated list of "
"initializer nodes to run before freezing.")
def freeze_graph(input_graph, input_saver, input_binary, input_checkpoint,
output_node_names, restore_op_name, filename_tensor_name,
output_graph, clear_devices, initializer_nodes):
"""Converts all variables in a graph and checkpoint into constants."""
if not tf.gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if input_saver and not tf.gfile.Exists(input_saver):
print("Input saver file '" + input_saver + "' does not exist!")
return -1
if not tf.gfile.Glob(input_checkpoint):
print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = tf.GraphDef()
mode = "rb" if input_binary else "r"
with tf.gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
_ = tf.import_graph_def(input_graph_def, name="")
with tf.Session() as sess:
if input_saver:
with tf.gfile.FastGFile(input_saver, mode) as f:
saver_def = tf.train.SaverDef()
if input_binary:
saver_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), saver_def)
saver = tf.train.Saver(saver_def=saver_def)
saver.restore(sess, input_checkpoint)
else:
sess.run([restore_op_name], {filename_tensor_name: input_checkpoint})
if initializer_nodes:
sess.run(initializer_nodes)
output_graph_def = graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names.split(","))
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def main(unused_args):
freeze_graph(FLAGS.input_graph, FLAGS.input_saver, FLAGS.input_binary,
FLAGS.input_checkpoint, FLAGS.output_node_names,
FLAGS.restore_op_name, FLAGS.filename_tensor_name,
FLAGS.output_graph, FLAGS.clear_devices, FLAGS.initializer_nodes)
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
|
Cloudino/Cloudino-Arduino-IDE
|
libraries/PubSubClient/tests/testcases/mqtt_publish_in_callback.py
|
42
|
1938
|
import unittest
import settings
import time
import mosquitto
import serial
def on_message(mosq, obj, msg):
obj.message_queue.append(msg)
class mqtt_publish_in_callback(unittest.TestCase):
message_queue = []
@classmethod
def setUpClass(self):
self.client = mosquitto.Mosquitto("pubsubclient_ut", clean_session=True,obj=self)
self.client.connect(settings.server_ip)
self.client.on_message = on_message
self.client.subscribe("outTopic",0)
@classmethod
def tearDownClass(self):
self.client.disconnect()
def test_connect(self):
i=30
while len(self.message_queue) == 0 and i > 0:
self.client.loop()
time.sleep(0.5)
i -= 1
self.assertTrue(i>0, "message receive timed-out")
self.assertEqual(len(self.message_queue), 1, "unexpected number of messages received")
msg = self.message_queue.pop(0)
self.assertEqual(msg.mid,0,"message id not 0")
self.assertEqual(msg.topic,"outTopic","message topic incorrect")
self.assertEqual(msg.payload,"hello world")
self.assertEqual(msg.qos,0,"message qos not 0")
self.assertEqual(msg.retain,False,"message retain flag incorrect")
def test_publish(self):
self.assertEqual(len(self.message_queue), 0, "message queue not empty")
payload = "abcdefghij"
self.client.publish("inTopic",payload)
i=30
while len(self.message_queue) == 0 and i > 0:
self.client.loop()
time.sleep(0.5)
i -= 1
self.assertTrue(i>0, "message receive timed-out")
self.assertEqual(len(self.message_queue), 1, "unexpected number of messages received")
msg = self.message_queue.pop(0)
self.assertEqual(msg.mid,0,"message id not 0")
self.assertEqual(msg.topic,"outTopic","message topic incorrect")
self.assertEqual(msg.payload,payload)
self.assertEqual(msg.qos,0,"message qos not 0")
self.assertEqual(msg.retain,False,"message retain flag incorrect")
|
lgpl-2.1
|
smartforceplus/SmartForceplus
|
addons/email_template/wizard/mail_compose_message.py
|
27
|
11709
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class mail_compose_message(osv.TransientModel):
_inherit = 'mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
""" Override to pre-fill the data when having a template in single-email mode
and not going through the view: the on_change is not called in that case. """
if context is None:
context = {}
res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'):
res.update(
self.onchange_template_id(
cr, uid, [], context['default_template_id'], res.get('composition_mode'),
res.get('model'), res.get('res_id'), context=context
)['value']
)
if fields is not None:
[res.pop(field, None) for field in res.keys() if field not in fields]
return res
_columns = {
'template_id': fields.many2one('email.template', 'Use template', select=True),
}
def send_mail(self, cr, uid, ids, context=None):
""" Override of send_mail to duplicate attachments linked to the email.template.
Indeed, basic mail.compose.message wizard duplicates attachments in mass
mailing mode. But in 'single post' mode, attachments of an email template
also have to be duplicated to avoid changing their ownership. """
if context is None:
context = {}
wizard_context = dict(context)
for wizard in self.browse(cr, uid, ids, context=context):
if wizard.template_id:
wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html
wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only
if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id:
continue
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context))
else:
new_attachment_ids.append(attachment.id)
self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context)
def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values """
if template_id and composition_mode == 'mass_mail':
fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']
template = self.pool['email.template'].browse(cr, uid, template_id, context=context)
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
if template.user_signature and 'body_html' in values:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
elif template_id:
values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
default_context = dict(context, default_composition_mode=composition_mode, default_model=model, default_res_id=res_id)
default_values = self.default_get(cr, uid, ['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'], context=default_context)
values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values)
if values.get('body_html'):
values['body'] = values.pop('body_html')
return {'value': values}
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model or 'mail.message')], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template_id = email_template.create(cr, uid, values, context=context)
# generate the saved template
template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value']
template_values['template_id'] = template_id
record.write(template_values)
return _reopen(self, record.id, record.model)
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']
returned_fields = fields + ['partner_ids', 'attachments']
values = dict.fromkeys(res_ids, False)
ctx = dict(context, tpl_partners_only=True)
template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx)
for res_id in res_ids:
res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))
res_id_values['body'] = res_id_values.pop('body_html', '')
values[res_id] = res_id_values
return values
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
""" Override to handle templates. """
# generate composer values
composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context)
# generate template-based values
if wizard.template_id:
template_values = self.generate_email_for_composer_batch(
cr, uid, wizard.template_id.id, res_ids,
fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'],
context=context)
else:
template_values = {}
for res_id in res_ids:
if template_values.get(res_id):
# recipients are managed by the template
composer_values[res_id].pop('partner_ids')
composer_values[res_id].pop('email_to')
composer_values[res_id].pop('email_cc')
# remove attachments from template values as they should not be rendered
template_values[res_id].pop('attachment_ids', None)
else:
template_values[res_id] = dict()
# update template values by composer values
template_values[res_id].update(composer_values[res_id])
return template_values
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process)
# Compatibility methods
def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
erickt/hue
|
desktop/core/ext-py/Django-1.6.10/django/db/models/fields/subclassing.py
|
227
|
1815
|
"""
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
|
apache-2.0
|
chouseknecht/ansible
|
test/units/modules/cloud/amazon/test_redshift_cross_region_snapshots.py
|
42
|
1356
|
from ansible.modules.cloud.amazon import redshift_cross_region_snapshots as rcrs
mock_status_enabled = {
'SnapshotCopyGrantName': 'snapshot-us-east-1-to-us-west-2',
'DestinationRegion': 'us-west-2',
'RetentionPeriod': 1,
}
mock_status_disabled = {}
mock_request_illegal = {
'snapshot_copy_grant': 'changed',
'destination_region': 'us-west-2',
'snapshot_retention_period': 1
}
mock_request_update = {
'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2',
'destination_region': 'us-west-2',
'snapshot_retention_period': 3
}
mock_request_no_update = {
'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2',
'destination_region': 'us-west-2',
'snapshot_retention_period': 1
}
def test_fail_at_unsupported_operations():
response = rcrs.requesting_unsupported_modifications(
mock_status_enabled, mock_request_illegal
)
assert response is True
def test_needs_update_true():
response = rcrs.needs_update(mock_status_enabled, mock_request_update)
assert response is True
def test_no_change():
response = rcrs.requesting_unsupported_modifications(
mock_status_enabled, mock_request_no_update
)
needs_update_response = rcrs.needs_update(mock_status_enabled, mock_request_no_update)
assert response is False
assert needs_update_response is False
|
gpl-3.0
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/python/Lib/OpenGL/GL/NV/framebuffer_multisample_coverage.py
|
4
|
1564
|
'''OpenGL extension NV.framebuffer_multisample_coverage
This module customises the behaviour of the
OpenGL.raw.GL.NV.framebuffer_multisample_coverage to provide a more
Python-friendly API
Overview (from the spec)
This extension extends the EXT_framebuffer_multisample
specification by providing a new function,
RenderBufferStorageMultisampleCoverageNV, that distinguishes
between color samples and coverage samples.
EXT_framebuffer_multisample introduced the function
RenderbufferStorageMultisampleEXT as a method of defining the
storage parameters for a multisample render buffer. This function
takes a <samples> parameter. Using rules provided by the
specification, the <samples> parameter is resolved to an actual
number of samples that is supported by the underlying hardware.
EXT_framebuffer_multisample does not specify whether <samples>
refers to coverage samples or color samples.
This extension adds the function
RenderbufferStorageMultisamplCoverageNV, which takes a
<coverageSamples> parameter as well as a <colorSamples> parameter.
These two parameters give developers more fine grained control over
the quality of multisampled images.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/framebuffer_multisample_coverage.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.framebuffer_multisample_coverage import *
### END AUTOGENERATED SECTION
|
agpl-3.0
|
norrs/debian-tomahawk
|
thirdparty/breakpad/third_party/protobuf/protobuf/gtest/test/gtest_env_var_test.py
|
233
|
3509
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
if IS_WINDOWS:
TestFlag('catch_exceptions', '1', '0')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
Frontier314/loftq-linux
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
lulandco/SickRage
|
lib/stevedore/driver.py
|
21
|
5496
|
from .exception import NoMatches, MultipleMatches
from .named import NamedExtensionManager
class DriverManager(NamedExtensionManager):
"""Load a single plugin with a given name from the namespace.
:param namespace: The namespace for the entry points.
:type namespace: str
:param name: The name of the driver to load.
:type name: str
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param on_load_failure_callback: Callback function that will be called when
a entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace, name,
invoke_on_load=False, invoke_args=(), invoke_kwds={},
on_load_failure_callback=None,
verify_requirements=False):
on_load_failure_callback = on_load_failure_callback \
or self._default_on_load_failure
super(DriverManager, self).__init__(
namespace=namespace,
names=[name],
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements,
)
@staticmethod
def _default_on_load_failure(drivermanager, ep, err):
raise
@classmethod
def make_test_instance(cls, extension, namespace='TESTING',
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
"""Construct a test DriverManager
Test instances are passed a list of extensions to work from rather
than loading them from entry points.
:param extension: Pre-configured Extension instance
:type extension: :class:`~stevedore.extension.Extension`
:param namespace: The namespace for the manager; used only for
identification since the extensions are passed in.
:type namespace: str
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged
and then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will
be called when a entrypoint can not be loaded. The
arguments that will be provided when this is called (when
an entrypoint fails to load) are (manager, entrypoint,
exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
:return: The manager instance, initialized for testing
"""
o = super(DriverManager, cls).make_test_instance(
[extension], namespace=namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements)
return o
def _init_plugins(self, extensions):
super(DriverManager, self)._init_plugins(extensions)
if not self.extensions:
name = self._names[0]
raise NoMatches('No %r driver found, looking for %r' %
(self.namespace, name))
if len(self.extensions) > 1:
discovered_drivers = ','.join(e.entry_point_target
for e in self.extensions)
raise MultipleMatches('Multiple %r drivers found: %s' %
(self.namespace, discovered_drivers))
def __call__(self, func, *args, **kwds):
"""Invokes func() for the single loaded extension.
The signature for func() should be::
def func(ext, *args, **kwds):
pass
The first argument to func(), 'ext', is the
:class:`~stevedore.extension.Extension` instance.
Exceptions raised from within func() are logged and ignored.
:param func: Callable to invoke for each extension.
:param args: Variable arguments to pass to func()
:param kwds: Keyword arguments to pass to func()
:returns: List of values returned from func()
"""
results = self.map(func, *args, **kwds)
if results:
return results[0]
@property
def driver(self):
"""Returns the driver being used by this manager.
"""
ext = self.extensions[0]
return ext.obj if ext.obj else ext.plugin
|
gpl-3.0
|
Lilykos/invenio
|
invenio/modules/previewer/bundles.py
|
11
|
1881
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Previewer bundles."""
from __future__ import unicode_literals
from invenio.ext.assets import Bundle, CleanCSSFilter, RequireJSFilter
pdfjs = Bundle(
"vendors/pdfjs-build/generic/web/compatibility.js",
"vendors/pdfjs-build/generic/web/l10n.js",
"vendors/pdfjs-build/generic/build/pdf.js",
"js/previewer/pdfjs/viewer.js",
"js/previewer/pdfjs/fullscreen.js",
filters="uglifyjs",
output="previewer/pdfjs.js",
weight=20,
bower={
"pdfjs-build": "latest"
}
)
pdftk = Bundle(
"js/previewer/pdf_viewer.js",
filters="uglifyjs",
output="previewer/pdftk.js",
weight=20
)
pdfjscss = Bundle(
"css/previewer/pdfjs/viewer.css",
filters=CleanCSSFilter(),
output="previewer/pdfjs.css",
weight=20
)
csv_previewer = Bundle(
"js/previewer/csv_previewer/init.js",
filters=RequireJSFilter(),
output="previewer/csv_previewer.js",
weight=20,
bower={
"d3": "latest"
}
)
pdftkcss = Bundle(
"css/previewer/pdf_viewer.css",
filters=CleanCSSFilter(),
output="previewer/pdftk.css",
weight=20
)
|
gpl-2.0
|
samirasnoun/django_cms_gallery_image
|
cms/south_migrations/0064_staticplaceholder_site_change.py
|
63
|
17484
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for sp in orm['cms.StaticPlaceholder'].objects.all():
sp.site = None
sp.save()
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
|
bsd-3-clause
|
hakonsbm/nest-simulator
|
pynest/nest/tests/test_connect_symmetric_pairwise_bernoulli.py
|
2
|
5160
|
# -*- coding: utf-8 -*-
#
# test_connect_symmetric_pairwise_bernoulli.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import collections
import numpy as np
import unittest
import scipy.stats
from . import test_connect_helpers as hf
from .test_connect_parameters import TestParams
class TestSymmetricPairwiseBernoulli(TestParams):
# sizes of source-, target-population and connection probability for
# statistical test
N_s = 60
N_t = 60
# specify connection pattern and specific params
rule = 'symmetric_pairwise_bernoulli'
p = 0.5
conn_dict = {'rule': rule, 'p': p, 'multapses': True,
'autapses': False, 'make_symmetric': True}
# Critical values and number of iterations of two level test
stat_dict = {'alpha2': 0.05, 'n_runs': 20}
def testStatistics(self):
for fan in ['in', 'out']:
expected = hf.get_expected_degrees_bernoulli(
self.p, fan, self.N_s, self.N_t)
pvalues = []
for i in range(self.stat_dict['n_runs']):
hf.reset_seed(i, self.nr_threads)
self.setUpNetwork(conn_dict=self.conn_dict,
N1=self.N_s, N2=self.N_t)
degrees = hf.get_degrees(fan, self.pop1, self.pop2)
degrees = hf.gather_data(degrees)
# degrees = self.comm.gather(degrees, root=0)
# if self.rank == 0:
if degrees is not None:
chi, p = hf.chi_squared_check(degrees, expected, self.rule)
pvalues.append(p)
hf.mpi_barrier()
if degrees is not None:
ks, p = scipy.stats.kstest(pvalues, 'uniform')
self.assertTrue(p > self.stat_dict['alpha2'])
def testAutapsesTrue(self):
conn_params = self.conn_dict.copy()
conn_params['autapses'] = True
N = 10
# test that autapses are not permitted
pop = hf.nest.Create('iaf_psc_alpha', N)
with self.assertRaises(hf.nest.kernel.NESTError):
hf.nest.Connect(pop, pop, conn_params)
def testAutapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 10
# test that autapses were excluded
conn_params['p'] = 1. - 1. / N
conn_params['autapses'] = False
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
M = hf.get_connectivity_matrix(pop, pop)
hf.mpi_assert(np.diag(M), np.zeros(N), self)
def testMultapses(self):
conn_params = self.conn_dict.copy()
conn_params['multapses'] = False
N = 10
# test that multapses must be permitted
hf.nest.ResetKernel()
pop = hf.nest.Create('iaf_psc_alpha', N)
with self.assertRaises(hf.nest.kernel.NESTError):
hf.nest.Connect(pop, pop, conn_params)
# test that multapses can only arise from symmetric
# connectivity
conn_params['p'] = 1. - 1. / N
conn_params['multapses'] = True
hf.nest.ResetKernel()
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
conn_dict = collections.defaultdict(int)
for conn in hf.nest.GetConnections():
key = tuple(conn[:2])
conn_dict[key] += 1
self.assertTrue(conn_dict[key] <= 2)
def testMakeSymmetric(self):
conn_params = self.conn_dict.copy()
N = 100
# test that make_symmetric must be enabled
conn_params['make_symmetric'] = False
hf.nest.ResetKernel()
pop = hf.nest.Create('iaf_psc_alpha', N)
with self.assertRaises(hf.nest.kernel.NESTError):
hf.nest.Connect(pop, pop, conn_params)
# test that all connections are symmetric
conn_params['make_symmetric'] = True
hf.nest.ResetKernel()
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
conns = set()
for conn in hf.nest.GetConnections():
key = tuple(conn[:2])
conns.add(key)
for conn in hf.nest.GetConnections():
key = tuple(conn[:2])
self.assertTrue(key[::-1] in conns)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(
TestSymmetricPairwiseBernoulli)
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
|
gpl-2.0
|
Edraak/circleci-edx-platform
|
lms/djangoapps/instructor/views/gradebook_api.py
|
28
|
4309
|
"""
Grade book view for instructor and pagination work (for grade book)
which is currently use by ccx and instructor apps.
"""
import math
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import transaction
from django.views.decorators.cache import cache_control
from opaque_keys.edx.keys import CourseKey
from edxmako.shortcuts import render_to_response
from courseware.courses import get_course_with_access
from instructor.offline_gradecalc import student_grades
from instructor.views.api import require_level
# Grade book: max students per page
MAX_STUDENTS_PER_PAGE_GRADE_BOOK = 20
def calculate_page_info(offset, total_students):
"""
Takes care of sanitizing the offset of current page also calculates offsets for next and previous page
and information like total number of pages and current page number.
:param offset: offset for database query
:return: tuple consist of page number, query offset for next and previous pages and valid offset
"""
# validate offset.
if not (isinstance(offset, int) or offset.isdigit()) or int(offset) < 0 or int(offset) >= total_students:
offset = 0
else:
offset = int(offset)
# calculate offsets for next and previous pages.
next_offset = offset + MAX_STUDENTS_PER_PAGE_GRADE_BOOK
previous_offset = offset - MAX_STUDENTS_PER_PAGE_GRADE_BOOK
# calculate current page number.
page_num = ((offset / MAX_STUDENTS_PER_PAGE_GRADE_BOOK) + 1)
# calculate total number of pages.
total_pages = int(math.ceil(float(total_students) / MAX_STUDENTS_PER_PAGE_GRADE_BOOK)) or 1
if previous_offset < 0 or offset == 0:
# We are at first page, so there's no previous page.
previous_offset = None
if next_offset >= total_students:
# We've reached the last page, so there's no next page.
next_offset = None
return {
"previous_offset": previous_offset,
"next_offset": next_offset,
"page_num": page_num,
"offset": offset,
"total_pages": total_pages
}
def get_grade_book_page(request, course, course_key):
"""
Get student records per page along with page information i.e current page, total pages and
offset information.
"""
# Unsanitized offset
current_offset = request.GET.get('offset', 0)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
total_students = enrolled_students.count()
page = calculate_page_info(current_offset, total_students)
offset = page["offset"]
total_pages = page["total_pages"]
if total_pages > 1:
# Apply limit on queryset only if total number of students are greater then MAX_STUDENTS_PER_PAGE_GRADE_BOOK.
enrolled_students = enrolled_students[offset: offset + MAX_STUDENTS_PER_PAGE_GRADE_BOOK]
student_info = [
{
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': student_grades(student, request, course),
'realname': student.profile.name,
}
for student in enrolled_students
]
return student_info, page
@transaction.non_atomic_requests
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def spoc_gradebook(request, course_id):
"""
Show the gradebook for this course:
- Only shown for courses with enrollment < settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
- Only displayed to course staff
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
student_info, page = get_grade_book_page(request, course, course_key)
return render_to_response('courseware/gradebook.html', {
'page': page,
'page_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
'students': student_info,
'course': course,
'course_id': course_key,
# Checked above
'staff_access': True,
'ordered_grades': sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
|
agpl-3.0
|
shiminasai/ciat_plataforma
|
guias_cacao/models.py
|
2
|
153154
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from mapeo.models import Persona
from sorl.thumbnail import ImageField
from multiselectfield import MultiSelectField
# Create your models here.
class FichaSombra(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha sombra"
verbose_name_plural = "Ficha sombra"
class Foto1(models.Model):
"""docstring for Foto1"""
foto = ImageField(upload_to='foto1Sombra')
ficha = models.ForeignKey(FichaSombra)
CHOICE_TIPO_PUNTO = (
(1, 'Perennifolia'),
(2, 'Caducifolia'),
)
CHOICE_TIPO_USO_PUNTO = (
(1, 'Leña'),
(2, 'Fruta'),
(3, 'Madera'),
(4, 'Sombra'),
(5, 'Nutrientes'),
)
class Especies(models.Model):
nombre = models.CharField('Nombre de la especie', max_length=250)
nombre_cientifico = models.CharField('Nombre cientifico de la especie', max_length=250, blank=True, null=True)
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO, blank=True, null=True)
tipo_uso = MultiSelectField(choices=CHOICE_TIPO_USO_PUNTO, verbose_name='Tipo de uso', blank=True, null=True)
foto = ImageField(upload_to='fotoEspecies', blank=True, null=True)
#pequenio
p_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
p_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
p_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
#mediano
m_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
m_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
m_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
#grande
g_altura = models.FloatField('Altura en (mt)', blank=True, null=True)
g_diametro = models.FloatField('Diametro en (cm)', blank=True, null=True)
g_ancho = models.FloatField('Ancho copa en (mt)s', blank=True, null=True)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = "Especie"
verbose_name_plural = "Especies"
CHOICE_TIPO_COPA_PUNTO = (
(1, 'Copa ancha'),
(2, 'Copa angosta'),
(3, 'Copa mediana'),
)
class Punto1(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto1"
class Cobertura1(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 1 --------------------------------------
class Foto2(models.Model):
"""docstring for Foto2"""
foto = ImageField(upload_to='foto2Sombra')
ficha = models.ForeignKey(FichaSombra)
class Punto2(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto2"
class Cobertura2(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 2 --------------------------------------
class Foto3(models.Model):
"""docstring for Foto3"""
foto = ImageField(upload_to='foto3Sombra')
ficha = models.ForeignKey(FichaSombra)
class Punto3(models.Model):
especie = models.ForeignKey(Especies)
pequena = models.FloatField(verbose_name='Pequeña')
mediana = models.FloatField(verbose_name='Mediana')
grande = models.FloatField(verbose_name='Grande')
tipo = models.IntegerField(choices=CHOICE_TIPO_PUNTO)
tipo_de_copa = models.IntegerField(choices=CHOICE_TIPO_COPA_PUNTO)
uso = models.IntegerField(choices=CHOICE_TIPO_USO_PUNTO)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Punto3"
class Cobertura3(models.Model):
cobertura = models.FloatField('% de cobertura de sombra')
ficha = models.ForeignKey(FichaSombra)
#------------------- fin de punto 3 --------------------------------------
class AnalisisSombra(models.Model):
densidad = models.IntegerField(
choices=(
(1,
'Alta'),
(2,
'Adecuada'),
(3,
'Baja'),
),
verbose_name='Densidad de árboles de sombra')
forma_copa = models.IntegerField(
choices=(
(1,
'Ancha'),
(2,
'Adecuada'),
(3,
'Angosta'),
),
verbose_name='Forma de copa de árboles de sombra')
arreglo = models.IntegerField(choices=((1, 'Uniforme'), (2, 'Desuniforme'),),
verbose_name='Arreglo de árboles')
hojarasca = models.IntegerField(
choices=(
(1,
'Suficiente'),
(2,
'No Suficiente'),
),
verbose_name='Cantidad de hojarasca ')
calidad_hojarasca = models.IntegerField(
choices=(
(1,
'Rico en nutrientes'),
(2,
'Pobre en nutriente'),
),
verbose_name='Calidad de hojarasca ')
competencia = models.IntegerField(
choices=(
(1,
'Fuerte'),
(2,
'Mediana'),
(3,
'Leve'),
),
verbose_name='Competencia de árboles con cacao')
Problema = models.IntegerField(
choices=(
(1,
'Cobertura'),
(2,
'Mal arreglo'),
(3,
'Competencia'),
(4,
'Densidad Tipo de árboles'),
(5,
'Ninguno')),
verbose_name='Problema de sombra')
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Análisis sobre sombra y árboles de sombra"
CHOICE_ACCIONES_SOMBRA = (
(1, 'Reducir la sombra'),
(2, 'Aumentar la sombra'),
(3, 'Ninguna'),
)
CHOICE_PODA = (
(1, 'Si'),
(2, 'No'),
)
CHOICE_TODO = (
(1, 'En todo la parcela '),
(2, 'Solo en una parte de la parcela'),
)
class AccionesSombra(models.Model):
accion = models.IntegerField(
choices=CHOICE_ACCIONES_SOMBRA,
verbose_name="Que acciones hay que realizar ")
ficha = models.ForeignKey(FichaSombra)
class ReducirSombra(models.Model):
poda = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Podando árboles")
poda_cuales = models.CharField(max_length=350)
eliminando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Cambiando árboles")
eliminando_cuales = models.CharField(max_length=350)
todo = models.IntegerField(
choices=CHOICE_TODO,
verbose_name="En todo la parcela o Solo en una parte de la parcela")
que_parte = models.CharField(max_length=250)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Si marca reducir la sombra"
class AumentarSombra(models.Model):
sembrando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Sembrando árboles")
sembrando_cuales = models.CharField(max_length=350)
cambiando = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Cambiando árboles")
cambiando_cuales = models.CharField(max_length=350)
todo = models.IntegerField(
choices=CHOICE_TODO,
verbose_name="En todo la parcela o Solo en una parte de la parcela")
que_parte = models.CharField(max_length=250)
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name_plural = "Si marca aumentar la sombra"
class ManejoSombra(models.Model):
herramientas = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene herramienta para manejo de sombra? ")
formacion = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene formación para manejo de sombra? ")
ficha = models.ForeignKey(FichaSombra)
class Meta:
verbose_name = "Herramienta y formación de sombras"
#-------------------------- fin ficha sombra ------------------------------
class FichaPoda(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='setproductor')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='settecnico')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha poda"
verbose_name_plural = "Ficha poda"
CHOICE_SI_NO = (
(1, 'Si'),
(2, 'No'),
)
CHOICE_PRODUCCION = (
(1, 'Alta'),
(2, 'Media'),
(3, 'Baja'),
)
CHOICE_PLANTAS1 = (
(1, 'Altura en mt'),
(2, 'Ancho de copa mt'),
)
CHOICE_PLANTAS2 = (
(1,
'Formación de horqueta'),
(2,
'Ramas en contacto '),
(3,
'Ramas entrecruzadas'),
(4,
'Ramas cercanas al suelo'),
(5,
'Chupones'),
(6,
'Penetración de Luz'),
)
CHOICE_PLANTAS3 = (
(1, 'Nivel de producción'),
)
class Punto1A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto1B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto1C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
#----------------------------- fin del punto 1 ---------------------------
class Punto2A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto2B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto2C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
#------------ fin del punto 2 ----------------------------
class Punto3A(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS1)
uno = models.FloatField(verbose_name='1')
dos = models.FloatField(verbose_name='2')
tres = models.FloatField(verbose_name='3')
cuatro = models.FloatField(verbose_name='4')
cinco = models.FloatField(verbose_name='5')
seis = models.FloatField(verbose_name='6')
siete = models.FloatField(verbose_name='7')
ocho = models.FloatField(verbose_name='8')
nueve = models.FloatField(verbose_name='9')
diez = models.FloatField(null=True, blank=True, verbose_name='10')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto3B(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS2)
uno = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_SI_NO, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
class Punto3C(models.Model):
plantas = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='1')
dos = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='2')
tres = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='3')
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='4')
cinco = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='5')
seis = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='6')
siete = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='7')
ocho = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='8')
nueve = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='9')
diez = models.IntegerField(choices=CHOICE_PRODUCCION, verbose_name='10', null=True, blank=True)
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return self.get_plantas_display()
# -------------------- fin punto 3 ----------------------------
CHOICES_PROBLEMA_PLANTA = (('A', 'Altura'),
('B', 'Ancho'),
('C', 'Ramas'),
('D', 'Horqueta'),
('E', 'Chupones'),
('F', 'Poca entrada de Luz'),
('G', 'Baja productividad'),
('H', 'Ninguna'),
)
CHOICES_TIPO_PODA = (('A', 'Poda de copa'),
('B', 'Poda de ramas'),
('C', 'Ramas'),
('D', 'Formar horquetas'),
('E', 'Deschuponar'),
('F', 'Ninguna'),
)
CHOICE_REALIZA_PODA = (
(1, 'En toda la parcela'),
(2, 'En Varios partes'),
(3, 'En algunas partes'), )
CHOICE_VIGOR = (
(1, 'Todas'),
(2, 'Algunas'),
(3, 'Ninguna'), )
CHOICE_ENTRADA_LUZ = (
(1, 'Poda de copa'),
(2, 'Quitar ramas entrecruzadas'),
(3, 'Arreglar la sombra'),
)
CHOICES_FECHA_PODA = (('A', 'Enero'),
('B', 'Febrero'),
('C', 'Marzo'),
('D', 'Abril'),
('E', 'Mayo'),
('F', 'Junio'),
('G', 'Julio'),
('H', 'Agosto'),
('I', 'Septiembre'),
('J', 'Octubre'),
('K', 'Noviembre'),
('L', 'Diciembre'),
)
class AnalisisPoda(models.Model):
campo1 = MultiSelectField(choices=CHOICES_PROBLEMA_PLANTA, verbose_name='¿Cuáles son los problemas principales en cuanto a las estructuras de las plantas?')
campo2 = MultiSelectField(choices=CHOICES_TIPO_PODA, verbose_name='¿Qué tipo de poda podemos aplicar para mejorar la estructura de las plantas?')
campo3 = models.IntegerField(choices=CHOICE_REALIZA_PODA, verbose_name='¿Dónde se va a realizar la poda para mejorar la estructura de las plantas?')
campo4 = models.IntegerField(choices=CHOICE_VIGOR, verbose_name='Las plantas tienen suficiente vigor, hojas y ramas para ser podadas?')
campo5 = models.IntegerField(choices=CHOICE_ENTRADA_LUZ, verbose_name='¿Cómo podemos mejorar la entrada de luz en las plantas con la poda?')
campo6 = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='¿Cuándo se van a realizar las podas?')
ficha = models.ForeignKey(FichaPoda)
def __unicode__(self):
return 'Analisis'
class Meta:
verbose_name_plural = 'Análisis de poda y acciones'
class ManejoPoda(models.Model):
herramientas = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene herramienta para manejo de poda? ")
formacion = models.IntegerField(
choices=CHOICE_PODA,
verbose_name="Tiene formación para manejo de poda? ")
ficha = models.ForeignKey(FichaPoda)
class Meta:
verbose_name = "Herramienta y formación de poda"
# ---------------------------- fin de ficha poda ------------------------------
class FichaPlaga(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_plaga')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_plaga')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha plaga"
verbose_name_plural = "Ficha plaga"
CHOICE_ENFERMEDADES_CACAOTALES = (
(1, 'Monilia'),
(2, 'Mazorca negra'),
(3, 'Mal de machete'),
(4, 'Mal de talluelo en el vivero'),
(5, 'Barrenadores de tallo'),
(6, 'Zompopos'),
(7, 'Chupadores o áfidos'),
(8, 'Escarabajos'),
(9, 'Comején'),
(10, 'Ardillas'),
(11, 'Otros'),
)
class PlagasEnfermedad(models.Model):
plagas = models.IntegerField(choices=CHOICE_ENFERMEDADES_CACAOTALES,
blank=True, null=True, verbose_name="Plagas y enfermedades")
visto = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="He visto en mi cacaotal")
dano = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="Hace daño año con año")
promedio = models.FloatField("¿Promedio nivel de daño en %?")
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"PlagasEnfermedad"
CHOICE_ACCIONES_ENFERMEDADES = (
(1, 'Recuento de plagas'),
(2, 'Cortar las mazorcas enfermas'),
(3, 'Abonar las plantas'),
(4, 'Aplicar Caldos'),
(5, 'Aplicar Fungicidas'),
(6, 'Manejo de sombra'),
(7, 'Podar las plantas de cacao'),
(8, 'Aplicar venenos para Zompopo'),
(9, 'Control de Comején'),
(10, 'Ahuyar Ardillas'),
(11, 'Otras'),
)
class AccionesEnfermedad(models.Model):
plagas_acciones = models.IntegerField(choices=CHOICE_ACCIONES_ENFERMEDADES,
blank=True, null=True, verbose_name="Manejo de Plagas y enfermedadess")
realiza_manejo = models.IntegerField(choices=CHOICE_SI_NO,
blank=True, null=True, verbose_name="Realiza en manejo")
cuantas_veces = models.IntegerField(blank=True, null=True,
verbose_name="Cuantas veces realizan el manejo")
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses realizan el manejo')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"AccionesEnfermedad"
class Meta:
verbose_name = "ACCIONES MANEJO DE PLAGAS Y ENFERMEDADE"
CHOICE_ORIENTACION = (
("A", 'Técnico'),
("B", 'Casa comercial'),
("C", 'Cooperativa'),
("D", 'Otros productores'),
("E", 'Experiencia propia/costumbres'),
("F", 'Otros medio de comunicación'),
)
class Orientacion(models.Model):
fuentes = MultiSelectField(choices=CHOICE_ORIENTACION,
verbose_name='3. Las fuentes de orientación para manejo de las plagas y enfermedades')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"Orientacion"
CHOICE_OBSERVACION_PUNTO1 = (
(1, 'Monilia'),
(2, 'Mazorca Negra'),
(3, 'Mal de machete'),
(4, 'Daño de ardilla'),
(5, 'Daño de barrenador'),
(6, 'Chupadores'),
(7, 'Daño de zompopo'),
(8, 'Bejuco'),
(9, 'Tanda'),
(10, 'Daño de comején'),
(11, 'Daño de minador de la hoja'),
(12, 'Daño por lana'),
(13, 'Otros'),
)
class ObservacionPunto1(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True, verbose_name='Diez')
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto1, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto1"
class ObservacionPunto1Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto1Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto1 nivel produccion"
class ObservacionPunto2(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto2, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto2"
class ObservacionPunto2Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto2Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto2 nivel produccion"
class ObservacionPunto3(models.Model):
planta = models.IntegerField(choices=CHOICE_OBSERVACION_PUNTO1,
blank=True, null=True)
uno = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_SI_NO, blank=True, null=True)
contador = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar = 0
if self.uno == 1:
contar += 1
if self.dos == 1:
contar += 1
if self.tres == 1:
contar += 1
if self.cuatro == 1:
contar += 1
if self.cinco == 1:
contar += 1
if self.seis == 1:
contar += 1
if self.siete == 1:
contar += 1
if self.ocho == 1:
contar += 1
if self.nueve == 1:
contar += 1
if self.dies == 1:
contar += 1
self.contador = contar
super(ObservacionPunto3, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto3"
class ObservacionPunto3Nivel(models.Model):
planta = models.IntegerField(choices=CHOICE_PLANTAS3)
uno = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dos = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
tres = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cuatro = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
cinco = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
seis = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
siete = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
ocho = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
nueve = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
dies = models.IntegerField(choices=CHOICE_PRODUCCION,
blank=True, null=True)
alta = models.IntegerField(editable=False, null=True, blank=True)
media = models.IntegerField(editable=False, null=True, blank=True)
baja = models.IntegerField(editable=False, null=True, blank=True)
ficha = models.ForeignKey(FichaPlaga)
def save(self, *args, **kwargs):
contar_alta = 0
if self.uno == 1:
contar_alta += 1
if self.dos == 1:
contar_alta += 1
if self.tres == 1:
contar_alta += 1
if self.cuatro == 1:
contar_alta += 1
if self.cinco == 1:
contar_alta += 1
if self.seis == 1:
contar_alta += 1
if self.siete == 1:
contar_alta += 1
if self.ocho == 1:
contar_alta += 1
if self.nueve == 1:
contar_alta += 1
if self.dies == 1:
contar_alta += 1
self.alta = contar_alta
contar_media = 0
if self.uno == 2:
contar_media += 1
if self.dos == 2:
contar_media += 1
if self.tres == 2:
contar_media += 1
if self.cuatro == 2:
contar_media += 1
if self.cinco == 2:
contar_media += 1
if self.seis == 2:
contar_media += 1
if self.siete == 2:
contar_media += 1
if self.ocho == 2:
contar_media += 1
if self.nueve == 2:
contar_media += 1
if self.dies == 2:
contar_media += 1
self.media = contar_media
contar_baja = 0
if self.uno == 3:
contar_baja += 1
if self.dos == 3:
contar_baja += 1
if self.tres == 3:
contar_baja += 1
if self.cuatro == 3:
contar_baja += 1
if self.cinco == 3:
contar_baja += 1
if self.seis == 3:
contar_baja += 1
if self.siete == 3:
contar_baja += 1
if self.ocho == 3:
contar_baja += 1
if self.nueve == 3:
contar_baja += 1
if self.dies == 3:
contar_baja += 1
self.baja = contar_baja
super(ObservacionPunto3Nivel, self).save(*args, **kwargs)
def __unicode__(self):
return u"Punto3 nivel produccion"
CHOICE_ENFERMEDADES = (
("A", 'Monilia'),
("B", 'Mazorca negra'),
("C", 'Mal de machete'),
("D", 'Mal de talluelo en el vivero'),
("E", 'Barrenadores de tallo'),
("F", 'Zompopos'),
("G", 'Chupadores o áfidos'),
("H", 'Escarabajos'),
("J", 'Comején'),
("K", 'Minador de la hoja'),
("L", 'Lana'),
("M", 'Ardillaa'),
("N", 'Bejuco'),
("O", 'Tanda'),
)
CHOICE_SITUACION_PLAGAS = (
(1, 'Varias plagas en todos los puntos'),
(2, 'Varias plagas en algunos puntos'),
(3, 'Pocas plagas en todos los puntos'),
(4, 'Pocas plagas en algunos puntos'),
(5, 'Una plaga en todos los puntos'),
(6, 'Una plaga en algunos puntos'),
)
class ProblemasPrincipales(models.Model):
observadas = MultiSelectField(choices=CHOICE_ENFERMEDADES,
verbose_name='Las plagas y enfermedades observadas en la parcela')
situacion = models.IntegerField(choices=CHOICE_SITUACION_PLAGAS,blank=True, null=True)
principales = MultiSelectField(choices=CHOICE_ENFERMEDADES,
verbose_name='Las plagas y enfermedades principales en la parcela')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"problemas principales"
CHOICE_ENFERMEDADES_PUNTO6_1 = (
("A", 'Suelo erosionado'),
("B", 'Suelo poco fértil'),
("C", 'Mucha competencia'),
("D", 'Mal drenaje'),
("E", 'Falta obras de conservación'),
("F", 'Suelo compacto'),
("G", 'Suelo con poca MO'),
("H", 'No usa abono o fertilizante'),
)
CHOICE_ENFERMEDADES_PUNTO6_2 = (
(1, 'Sombra muy densa'),
(2, 'Sombra muy rala'),
(3, 'Sombra mal distribuida'),
(4, 'Arboles de sombra no adecuada'),
(5, 'Mucha auto-sombra'),
(6, 'Mucho banano'),
)
CHOICE_ENFERMEDADES_PUNTO6_3 = (
("A", 'Poda no adecuada'),
("B", 'Piso no manejado'),
("C", 'No eliminan mazorcas enfermas'),
("D", 'No hay manejo de plagas'),
("E", 'Plantas desnutridas'),
("F", 'Plantación vieja'),
("G", 'Variedades susceptibles'),
("H", 'Variedades no productivas'),
)
class Punto6Plagas(models.Model):
observaciones = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO6_1,
verbose_name='Observaciones de suelo ')
sombra = models.IntegerField(choices=CHOICE_ENFERMEDADES_PUNTO6_2,
verbose_name="Observaciones de sombra", blank=True, null=True)
manejo = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO6_3,
verbose_name='Observaciones de manejo ')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 6"
CHOICE_ACCIONES_PUNTO7_1 = (
(1, 'Recuento de plagas'),
(2, 'Cortar las mazorcas enfermas'),
(3, 'Abonar las plantas'),
(4, 'Aplicar Caldos'),
(5, 'Aplicar Fungicidas'),
(6, 'Manejo de sombra'),
(7, 'Podar las plantas de cacao'),
(8, 'Aplicar venenos para Zompopo'),
(9, 'Control de Comején'),
)
CHOICE_ACCIONES_PUNTO7_2 = (
(1, 'Toda la parcela'),
(2, 'Alguna parte de la parcela'),
)
class Punto7Plagas(models.Model):
manejo = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_1,
verbose_name="Manejo de plagas y enfermedades", blank=True, null=True)
parte = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_2,
verbose_name="En que parte", blank=True, null=True)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realizar el manejo')
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 7"
CHOICE_ENFERMEDADES_PUNTO8 = (
("A", 'Medial Luna'),
("B", 'Tijera'),
("C", 'Serrucho'),
("D", 'Bomba de mochila'),
("E", 'Barril'),
("F", 'Cutacha'),
("G", 'No tiene'),
("H", 'Coba'),
)
class Punto8y9Plagas(models.Model):
equipos = MultiSelectField(choices=CHOICE_ENFERMEDADES_PUNTO8,
verbose_name='8.¿Tenemos los equipos necesarios para realizar manejo de plagas y enfermedades?')
opcion = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="9.¿Tenemos la formación para realizar el manejo de plagas y enfermedades?",
blank=True, null=True)
ficha = models.ForeignKey(FichaPlaga)
def __unicode__(self):
return u"punto 8 y 9"
#------------------------------ fin de ficha de plagas -------------------------------
class FichaPiso(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_piso')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_piso')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha piso"
verbose_name_plural = "Fichas piso"
CHOICE_PISO1 = (
("A", 'Zacates o matas de hoja angosta'),
("B", 'Arbustos o plantas de hoja ancha'),
("C", 'Coyol o Coyolillo'),
("D", 'Bejucos'),
("E", 'Tanda'),
("F", 'Cobertura de hoja ancha'),
("G", 'Cobertura de hoja angosta'),
)
class PisoPunto1(models.Model):
punto1 = MultiSelectField(choices=CHOICE_PISO1,
verbose_name='1.¿Cuáles son las hierbas qué cubren el piso y sube sobre las planta de cacao? ')
punto2 = MultiSelectField(choices=CHOICE_PISO1,
verbose_name='2.¿Cuáles son las hierbas qué usted considera dañino? ')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"piso 1 y 2"
CHOICE_PISO3 = (
(1, 'Recuento de malezas'),
(2, 'Chapoda tendida'),
(3, 'Chapoda selectiva'),
(4, 'Aplicar herbicidas total'),
(5, 'Aplicar herbicidas en parches'),
(6, 'Manejo de bejuco'),
(7, 'Manejo de tanda'),
(8, 'Regulación de sombra'),
)
class PisoPunto3(models.Model):
manejo = models.IntegerField(choices=CHOICE_PISO3,
verbose_name="Manejo de piso",
blank=True, null=True)
realiza = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="Realiza en manejo",
blank=True, null=True)
veces = models.FloatField("Cuantas veces realizan el manejo")
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realiza el manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 3"
CHOICE_PISO4 = (
("A", 'Técnico'),
("B", 'Casa comercial'),
("C", 'Cooperativa'),
("D", 'Otros productores'),
("E", 'Experiencia propia/costumbres'),
("F", 'Otros medio de comunicación'),
)
class PisoPunto4(models.Model):
manejo = MultiSelectField(choices=CHOICE_PISO4,
verbose_name='4.¿De dónde viene su orientación de manejo de malas hierbas?')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 4"
CHOICE_PISO5 = (
(1, 'Zacate anual'),
(2, 'Zacate perene'),
(3, 'Hoja ancha anual'),
(4, 'Hoja ancha perenne'),
(5, 'Ciperácea o Coyolillo'),
(6, 'Bejucos en suelo'),
(7, 'Cobertura hoja ancha'),
(8, 'Cobertura hoja angosta'),
(9, 'Hojarasca'),
(10, 'Mulch de maleza'),
(11, 'Suelo desnudo')
)
class PisoPunto5(models.Model):
estado = models.IntegerField(choices=CHOICE_PISO5,
verbose_name="Estado de Piso",
blank=True, null=True)
conteo = models.FloatField('Conteo (números)')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 5"
CHOICE_PISO6_1 = (
("A", 'Sin competencia'),
("B", 'Media competencia'),
("C", 'Alta competencia'),
)
CHOICE_PISO6_2 = (
(1, 'Piso cubierto pero compite'),
(2, 'Piso medio cubierto y compite'),
(3, 'Piso no cubierto'),
(4, 'Piso con mucho bejuco'),
(5, 'Plantas con bejuco'),
(6, 'Plantas con tanda'),
)
CHOICE_PISO6_3 = (
("A", 'Zacate anual'),
("B", 'Zacate perene'),
("C", 'Hoja ancha anual'),
("D", 'Hoja ancha perenne'),
("E", 'Ciperácea o Coyolillo'),
("F", 'Bejucos'),
)
class PisoPunto6(models.Model):
manejo = MultiSelectField(choices=CHOICE_PISO6_1,
verbose_name='La competencia entre malas hierbas y las plantas de cacao?')
estado = models.IntegerField(choices=CHOICE_PISO6_2,
verbose_name="La cobertura del piso de cacaotal",
blank=True, null=True)
maleza = MultiSelectField(choices=CHOICE_PISO6_3,
verbose_name='Tipo de malezas que compiten')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 6"
CHOICE_PISO7_1 = (
("A", 'Suelo erosionado'),
("B", 'Suelo poco fértil'),
("C", 'Mal drenaje'),
("D", 'Suelo compacto'),
("E", 'Suelo con poca MO'),
("F", 'No usa abono o fertilizante'),
)
CHOICE_PISO7_2 = (
("A", 'Sombra muy rala'),
("B", 'Sombra mal distribuida'),
("C", 'Arboles de sombra no adecuada'),
("D", 'Poco banano'),
)
CHOICE_PISO7_3 = (
("A", 'Chapoda no adecuada'),
("B", 'Chapoda tardía'),
("C", 'No hay manejo selectivo'),
("D", 'Plantas desnutridas'),
("E", 'Plantación vieja'),
("F", 'Mala selección de herbicidas'),
)
class PisoPunto7(models.Model):
suelo = MultiSelectField(choices=CHOICE_PISO7_1,
verbose_name='Observaciones de suelo ')
sombra = MultiSelectField(choices=CHOICE_PISO7_2,
verbose_name='Observaciones de sombra')
manejo = MultiSelectField(choices=CHOICE_PISO7_3,
verbose_name='Observaciones de manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 7"
CHOICE_PISO8 = (
(1, 'Recuento de malezas'),
(2, 'Chapoda tendida'),
(3, 'Chapoda selectiva'),
(4, 'Aplicar herbicidas total'),
(5, 'Aplicar herbicidas en parches'),
(6, 'Manejo de bejuco'),
(7, 'Manejo de tanda'),
(8, 'Regulación de sombra'),
)
class PisoPunto8(models.Model):
piso = models.IntegerField(choices=CHOICE_PISO8,
verbose_name="Manejo de piso",
blank=True, null=True)
parte = models.IntegerField(choices=CHOICE_ACCIONES_PUNTO7_2,
verbose_name="En que parte",
blank=True, null=True)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses vamos a realizar el manejo')
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 8"
CHOICE_PISO10 = (
("A", 'Machete'),
("B", 'Pico'),
("C", 'Pala'),
("D", 'Bomba de mochila'),
("E", 'Barril'),
("F", 'Cutacha'),
("G", 'No tiene'),
("H", 'Coba'),
)
class PisoPunto10(models.Model):
equipo = MultiSelectField(choices=CHOICE_PISO10,
verbose_name='10.¿Tenemos los equipos necesarios para realizar manejo de piso?')
formacion = models.IntegerField(choices=CHOICE_SI_NO,
verbose_name="11.¿Tenemos la formación para realizar el manejo de piso?",
blank=True, null=True)
ficha = models.ForeignKey(FichaPiso)
def __unicode__(self):
return u"punto 10 y 11"
#-------------------------- entradas de suelo ----------------------------------
class FichaSuelo(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_suelo')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_suelo')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha suelo"
verbose_name_plural = "Ficha suelo"
CHOICE_SUELO_USO_PARCELA = (
(1, 'Bosque'),
(2, 'Potrero'),
(3, 'Granos básicos'),
(4, 'Tacotal'),
(5, 'Cacaotal viejo'),
)
CHOICE_SUELO_LIMITANTES = (
('A', 'Acidez / pH del suelo '),
('B', 'Encharcamiento / Mal Drenaje'),
('C', 'Enfermedades de raíces '),
('D', 'Deficiencia de nutrientes'),
('E', 'Baja materia orgánica'),
('F', 'Baja actividad biológica y presencia de lombrices'),
('G', 'Erosión'),
('H', 'Compactación e infiltración de agua'),
)
CHOICE_SUELO_ORIENTACION = (
('A', 'Técnico'),
('B', 'Casa comercial'),
('C', 'Cooperativa'),
('D', 'Otros productores'),
('E', 'Experiencia propia/costumbres'),
('F', 'Otros medio de comunicación'),
('G', 'Análisis de suelo '),
('H', 'Otros '),
)
CHOICE_SUELO_ABONOS = (
('A', 'Hecho en finca (compost, estiércol)'),
('B', 'Regalados de otra finca (compost, estiércol)'),
('C', 'Comprados de otra finca (compost, estiércol)'),
('D', 'Comprado de casa comercial'),
('E', 'Con crédito de la cooperativa'),
('F', 'Incentivos/Regalados'),
('G', 'No aplica'),
)
class Punto1Suelo(models.Model):
uso_parcela = models.IntegerField(choices=CHOICE_SUELO_USO_PARCELA,
verbose_name="Cuál era el uso de la parcela antes de establecer el cacao?")
limitante = MultiSelectField(choices=CHOICE_SUELO_LIMITANTES,
verbose_name='Cuáles son los limitantes productivos del suelo de la parcela?')
orientacion = MultiSelectField(choices=CHOICE_SUELO_ORIENTACION,
verbose_name='Quien su orientación de manejo de fertilidad de suelo?')
abonos = MultiSelectField(choices=CHOICE_SUELO_ABONOS,
verbose_name='4. De donde consigue los abonos, fertilizantes y enmiendas de suelo?')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 1"
class Meta:
verbose_name = 'Historial de manejo y limitaciones observados'
verbose_name_plural = 'Historial de manejo y limitaciones observados'
CHOICE_SUELO_EROSION_OPCION = (
(1, 'Deslizamientos'),
(2, 'Evidencia de erosión'),
(3, 'Cárcavas'),
(4, 'Área de acumulación de sedimentos'),
(5, 'Pedregosidad'),
(6, 'Raíces desnudos'),
)
CHOICE_SUELO_EROSION_RESPUESTA = (
(1, 'No presente'),
(2, 'Algo'),
(3, 'Severo'),
)
class PuntoASuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_EROSION_OPCION,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_EROSION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Indicadores de erosión"
class Meta:
verbose_name = 'Indicadores de erosión'
verbose_name_plural = 'Indicadores de erosión'
CHOICE_SUELO_CONSERVACION_OPCION = (
(1, 'Barrera muertas'),
(2, 'Barrera Viva'),
(3, 'Siembra en Curvas a Nivel'),
(4, 'Terrazas'),
(5, 'Cobertura de piso'),
)
CHOICE_SUELO_CONSERVACION_RESPUESTA = (
(1, 'No presente'),
(2, 'En mal estado'),
(3, 'En buen estado'),
)
class PuntoBSuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_OPCION,
verbose_name="Obras")
respuesta = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Obras de conservación de suelo"
class Meta:
verbose_name = 'Obras de conservación de suelo'
verbose_name_plural = 'Obras de conservación de suelo'
CHOICE_SUELO_DRENAJE_OPCION = (
(1, 'Encharcamientos'),
(2, 'Amarillamiento/mal crecimiento'),
(3, 'Enfermedades (phytophthora)'),
)
class Punto2ASuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_DRENAJE_OPCION,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_EROSION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Indicadores de drenaje"
class Meta:
verbose_name = 'Indicadores de drenaje'
verbose_name_plural = 'Indicadores de drenaje'
CHOICE_SUELO_DRENAJE_OPCION2 = (
(1, 'Acequias'),
(2, 'Canales de drenaje a lo largo y ancho de la parcela'),
(3, 'Canales de drenaje alrededor de las plantas'),
(4, 'Canales a lado de la parcela'),
(5, 'Cobertura de piso'),
)
class Punto2BSuelo(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_DRENAJE_OPCION2,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_CONSERVACION_RESPUESTA,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Obras de drenaje"
class Meta:
verbose_name = 'Obras de drenaje'
verbose_name_plural = 'Obras de drenaje'
CHOICE_SUELO_OPCION_PUNTOS = (
(1, 'Severidad de daño de nematodos'),
(2, 'Severidad de daño de hongos'),
)
CHOICE_SUELO_RESPUESTA_PUNTOS = (
(1, 'No Afectado'),
(2, 'Afectado'),
(3, 'Muy Afectados'),
(4, 'Severamente afectados'),
)
class Punto3SueloPunto1(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 1"
class Meta:
verbose_name = 'Salud de Raíces punto 1'
verbose_name_plural = 'Salud de Raíces punto 1'
class Punto3SueloPunto2(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 2"
class Meta:
verbose_name = 'Salud de Raíces punto 2'
verbose_name_plural = 'Salud de Raíces punto 2'
class Punto3SueloPunto3(models.Model):
opcion = models.IntegerField(choices=CHOICE_SUELO_OPCION_PUNTOS,
verbose_name="Indicadores")
respuesta = models.IntegerField(choices=CHOICE_SUELO_RESPUESTA_PUNTOS,
verbose_name="respuesta")
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Punto 3"
class Meta:
verbose_name = 'Salud de Raíces punto 3'
verbose_name_plural = 'Salud de Raíces punto 3'
class Punto4Suelo(models.Model):
area = models.FloatField(verbose_name='Tamaño de Área de Cacao SAF (en manzanas)')
densidad = models.FloatField(verbose_name='Densidad de Arboles de Cacao en parcela SAF (por manzana)')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Balance de nutrientes de parcela Cacao SAF"
class Meta:
verbose_name = 'Balance de nutrientes de parcela Cacao SAF'
verbose_name_plural = 'Balance de nutrientes de parcela Cacao SAF'
CHOICE_SUELO_PRODUCTO_COSECHA = (
(1, 'Cacao Grano Seco - (qq/mz/año)'),
(2, 'Leña - (cargas de 125lb /mz/año)'),
(3, 'Cabezas de Banano - (cabezas/mz/año)'),
)
class Punto4SueloCosecha(models.Model):
producto = models.IntegerField(choices=CHOICE_SUELO_PRODUCTO_COSECHA)
cantidad = models.FloatField()
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Cosechas del Productos SAF"
class Meta:
verbose_name = 'Cosechas del Productos SAF'
verbose_name_plural = 'Cosechas del Productos SAF'
class Punto4SueloSI(models.Model):
opcion = models.IntegerField(choices=CHOICE_SI_NO)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Se regresa la cascara a la parcela como abono?"
class Meta:
verbose_name = 'Se regresa la cascara a la parcela como abono?'
verbose_name_plural = 'Se regresa la cascara a la parcela como abono?'
class TipoFertilizantes(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return u'%s' % (self.nombre)
CHOICE_UNIDAD_MEDIDA_ABONO = ((1,'lb/mz'),(2,'lb/planta '),(3,'oz/planta'),(4,'L/mz'),(5, 'qq/mz'))
class Punto5SueloAbonos(models.Model):
tipo = models.ForeignKey(TipoFertilizantes)
cantidad = models.FloatField('Cantidad(Valor)')
unidad = models.IntegerField(choices=CHOICE_UNIDAD_MEDIDA_ABONO)
humedad = models.FloatField('Humedad (%)')
frecuencia = models.FloatField('Frecuencia (por año)')
meses = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='Meses de aplicación')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF"
class Meta:
verbose_name = 'Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF'
verbose_name_plural = 'Abonos, fertilizantes y Enmiendas aplicadas en la parcela cacao SAF'
class DatosAnalisis(models.Model):
variable = models.CharField(max_length=250)
unidad = models.CharField(max_length=250)
valor_critico = models.FloatField()
def __unicode__(self):
return self.variable
class Punto6AnalisisSuelo(models.Model):
variable = models.ForeignKey(DatosAnalisis)
valor = models.FloatField()
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Datos de análisis de suelo"
class Meta:
verbose_name = 'Datos de análisis de suelo'
verbose_name_plural = 'Datos de análisis de suelo'
class Punto7TipoSuelo(models.Model):
opcion = models.IntegerField(choices=(
(1,'Ultisol (rojo)'),
(2, 'Andisol (volcánico)'),
(3, 'Vertisol'),))
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Tipo de suelo"
class Meta:
verbose_name = 'Tipo de suelo'
verbose_name_plural = 'Tipo de suelo'
class Punto8SueloPropuesta(models.Model):
tipo = models.ForeignKey(TipoFertilizantes)
cantidad = models.FloatField('Cantidad(Valor)')
unidad = models.IntegerField(choices=CHOICE_UNIDAD_MEDIDA_ABONO)
frecuencia = models.FloatField('Frecuencia (por año)')
meses = MultiSelectField(choices=CHOICES_FECHA_PODA, verbose_name='Meses de aplicación')
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Nueva Propuesta de Fertilización Generada"
class Meta:
verbose_name = 'Nueva Propuesta de Fertilización Generada'
verbose_name_plural = 'Nueva Propuesta de Fertilización Generada'
CHOICE_PUNTO9_LIMITACION_1 = (
(1, 'Erosión de Suelo'),
)
CHOICE_PUNTO9_LIMITACION_1_ACCION = (
('A', 'Barrera viva'),
('B', 'Cobertura de suelo'),
('C', 'Barrera Muerta'),
('D', 'Siembra en Curvas a Nivel'),
('E', 'Terrazas'),
)
CHOICE_PUNTO9_LIMITACION_2 = (
(1, 'Mal drenaje y encharamientos'),
)
CHOICE_PUNTO9_LIMITACION_2_ACCION = (
('A', 'Acequias'),
('B', 'Canales de drenaje de larga'),
('C', 'Canales de drenaje alrededor de la parcela'),
)
CHOICE_PUNTO9_LIMITACION_3 = (
(1, 'Deficiencia de Nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_3_ACCION = (
('A', 'Aplicar abonos orgánicos'),
('B', 'Aplicar abonos minerales'),
)
CHOICE_PUNTO9_LIMITACION_4 = (
(1, 'Exceso de nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_4_ACCION = (
('A', 'Bajar nivel de fertilización'),
)
CHOICE_PUNTO9_LIMITACION_5 = (
(1, 'Desbalance de nutrientes'),
)
CHOICE_PUNTO9_LIMITACION_5_ACCION = (
('A', 'Ajustar programa de fertilización '),
)
CHOICE_PUNTO9_LIMITACION_6 = (
(1, 'Enfermedades y plagas de raíces'),
)
CHOICE_PUNTO9_LIMITACION_6_ACCION = (
('A', 'Abonos orgánicos'),
('B', 'Obras de drenaje'),
('C', 'Aplicación de ceniza'),
)
CHOICE_PUNTO9_DONDE = (
(1, 'En todo parcela'),
(2, 'En algunas partes'),
)
class Punto9Erosion(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_1)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_1_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de eroción"
class Meta:
verbose_name = 'Erosión de Suelo'
verbose_name_plural = 'Erosión de Suelo'
class Punto9Drenaje(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_2)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_2_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de mal drenaje"
class Meta:
verbose_name = 'Mal drenaje y encharamientos'
verbose_name_plural = 'Mal drenaje y encharamientos'
class Punto9Nutrientes(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_3)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_3_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de deficiencia nutrientes"
class Meta:
verbose_name = 'Deficiencia de Nutrientes'
verbose_name_plural = 'Deficiencia de Nutrientes'
class Punto9Exceso(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_4)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_4_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de exceso de nutrientes"
class Meta:
verbose_name = 'Exceso de nutrientes'
verbose_name_plural = 'Exceso de nutrientes'
class Punto9Desbalance(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_5)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_5_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de desbalance de nutrientes"
class Meta:
verbose_name = 'Desbalance de nutrientes'
verbose_name_plural = 'Desbalance de nutrientes'
class Punto9Enfermedades(models.Model):
limitaciones = models.IntegerField(choices=CHOICE_PUNTO9_LIMITACION_6)
acciones = MultiSelectField(choices=CHOICE_PUNTO9_LIMITACION_6_ACCION, verbose_name='Acciones potenciales')
donde = models.IntegerField(choices=CHOICE_PUNTO9_DONDE)
ficha = models.ForeignKey(FichaSuelo)
def __unicode__(self):
return u"Toma de decisión con base en las observaciones de enfermedades y plagas"
class Meta:
verbose_name = 'Enfermedades y plagas de raíces'
verbose_name_plural = 'Enfermedades y plagas de raíces'
#------------ fin ficha suelo ---------------------------------
#-------------------- comienza ficha viviero ------------------
class FichaVivero(models.Model):
productor = models.ForeignKey(Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_vivero')
tecnico = models.ForeignKey(Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_vivero')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha vivero"
verbose_name_plural = "Ficha vivero"
CHOICE_VIVERO_CONVERSACION_1 = (
('A', 'Enero'),
('B', 'Febrero'),
('C', 'Marzo'),
('D', 'Abril'),
('E', 'Mayo'),
('F', 'Junio'),
('G', 'Julio'),
('H', 'Agosto'),
('I', 'Septiembre'),
('J', 'Octubre'),
('K', 'Noviembre'),
('L', 'Diciembre'),
)
CHOICE_VIVERO_CONVERSACION_2 = (
('A', 'En este momento hay buena semilla'),
('B', 'En este momento hay suficiente agua'),
('C', 'En este momento hay menos plagas'),
('D', 'Nos permite para tener plantas listas para sembrar en el invierno'),
)
CHOICE_VIVERO_CONVERSACION_3 = (
('A', 'Buena altura'),
('B', 'Tallo fuerte'),
('C', 'Buena formación horqueta'),
('D', 'Ramas principales robustas'),
('E', 'Buena producción de frutos (más de 40 frutos por planta)'),
('F', 'Alta tolerancia a plagas y enfermedades'),
('G', 'Más de 40 almendras dentro de la mazorca'),
)
CHOICE_VIVERO_CONVERSACION_4 = (
('A', 'Corte de mazorca madura'),
('B', 'Extracción de almendras'),
('C', 'Selección de almendras de mayor tamaño'),
('D', 'Remoción de mucilago o baba'),
('E', 'Empaque en bolsas plásticas con aserrín semi-húmedo'),
('F', 'Toma en cuenta fases de la luna'),
)
CHOICE_VIVERO_CONVERSACION_5 = (
('A', 'Soleando la tierra'),
('B', 'Aplicando agua caliente'),
('C', 'Aplicando cal o ceniza'),
('D', 'Aplicando venenos'),
('E', 'No desinfecta'),
)
CHOICE_VIVERO_CONVERSACION_6 = (
(1, 'Sola tierra'),
(2, 'Tierra + Arena'),
(3, 'Tierra + Abono orgánico (compost)'),
(4, 'Tierra + abono orgánico + Cal o ceniza'),
(5, 'Tierra + Arena + Cal o Ceniza + Abono orgánico'),
)
CHOICE_VIVERO_CONVERSACION_7 = (
(1, 'Bolsa de 6 X 8 pulgadas '),
(2, 'Bolsa de 8 X 10 pulgadas'),
(3, 'Bolsa de 10 X 12 pulgadas'),
)
CHOICE_VIVERO_CONVERSACION_8 = (
(1, 'Acostado u horizontal'),
(2, 'Parado o Vertical'),
(3, 'De cualquier manera'),
)
CHOICE_VIVERO_CONVERSACION_9 = (
('A', 'Cerca de fuentes de agua'),
('B', 'Cercado protegido de animales'),
('C', 'Terreno plano'),
('D', 'Con buena orientación de los bancos (Este-Oeste)'),
('E', 'Con sombra natural'),
('F', 'Con ramada'),
)
CHOICE_VIVERO_CONVERSACION_10 = (
(1, 'Injerto de yema'),
(2, 'Injerto de cogollo'),
(3, 'Ninguno'),
)
CHOICE_VIVERO_CONVERSACION_12 = (
(1, 'De la misma finca'),
(2, 'De finca vecina'),
(3, 'De Jardín Clonal'),
)
class VivieroConversacion(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_1,
verbose_name='1.¿En qué meses del año planifica o construye viveros para producción de plantas de cacao?')
conversacion2 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_2,
verbose_name='2.¿Por qué hace vivero en estos meses?')
conversacion3 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_3,
verbose_name='3.¿Cuáles son características más deseables para una planta productiva?')
conversacion4 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_4,
verbose_name='4.¿Qué pasos realiza para la preparación de semillas de cacao?')
conversacion5 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_5,
verbose_name='5.¿Con qué desinfecta el suelo para el vivero?')
conversacion6 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_6,
verbose_name='¿Cómo prepara el sustrato para la producción de plantas de cacao en vivero?')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Conversación con el Productor o productora"
# class Meta:
# verbose_name = 'I.Conversación con el Productor o productora'
# verbose_name_plural = 'I.Conversación con el Productor o productora'
CHOICE_VIVERO_NUEVO_CONVERSACION2 = ((1,'Misma finca'),(2,'Del jardin clonal'),(3, 'Otras fuentes'))
class ViveroConversacion2(models.Model):
conversacion7 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_7,
verbose_name='¿Qué tamaño de bolsa de polietileno utiliza para la producción de plantas en vivero?')
conversacion8 = models.IntegerField(choices=CHOICE_VIVERO_CONVERSACION_8,
verbose_name='¿Cómo coloca la semilla en el sustrato en la bolsa de polietileno?')
conversacion9 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_9,
verbose_name='¿Cómo es el sitio del vivero?')
conversacion10 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_10,
verbose_name=' ¿Qué tipo de injerto ha realizado?')
conversacion11 = models.FloatField('¿Cuál ha sido el porcentaje de prendimiento?', null=True)
conversacion12 = MultiSelectField(choices=CHOICE_VIVERO_CONVERSACION_12,
verbose_name='¿De dónde obtiene las varetas para realizar los injertos?')
conversacion13 = models.FloatField('¿Cuanto meses se mantiene la plata en el vivero?', null=True, blank=True)
conversacion14 = models.IntegerField(choices=CHOICE_VIVERO_NUEVO_CONVERSACION2,
verbose_name='¿De donde obtiene las semillas?', null=True, blank=True)
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Conversación con el Productor o productora 2"
#observaciones
CHOICER_VIVIERO_FUENTE_SEMILLA = ((1,'De la misma finca'),(2,'De finca vecina'),(3,'De Jardín Clonal'))
class VivieroObservacion1(models.Model):
observacion1 = models.FloatField('Cantidad de las plantas')
observacion2 = models.FloatField('Edad de las plantas en meses')
observacion3 = models.IntegerField(choices=CHOICER_VIVIERO_FUENTE_SEMILLA,
verbose_name='Fuente de semilla')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Observación del vivero 1"
class Meta:
verbose_name = ''
verbose_name_plural = ''
CHOICE_VIVERO_PLAGAS_ENFERMEDADES = (
(1, 'Zompopo'),
(2, 'Barrenador'),
(3, 'Minador'),
(4, 'Tizón'),
(5, 'Antracnosis'),
(6, 'Mal de talluelo'),
(7, 'Áfidos'),
(8, 'Gusanos'),
(9, 'Deficiencia nutricional'),
)
CHOICE_VIVERO_SI_NO = (
(1, 'Si'),
(2, 'No'),
)
class VivieroObservacion2(models.Model):
observacion3 = models.IntegerField(choices=CHOICE_VIVERO_PLAGAS_ENFERMEDADES,
verbose_name='Plaga o enfermedad')
planta_1 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_2 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_3 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_4 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_5 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_6 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_7 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_8 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_9 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
planta_10 = models.IntegerField(choices=CHOICE_VIVERO_SI_NO)
ficha = models.ForeignKey(FichaVivero)
total_si = models.IntegerField(editable=False, null=True, blank=True)
def __unicode__(self):
return u"Observación del vivero 2"
def save(self, *args, **kwargs):
contar_total = 0
if self.planta_1 == 1:
contar_total += 1
if self.planta_2 == 1:
contar_total += 1
if self.planta_3 == 1:
contar_total += 1
if self.planta_4 == 1:
contar_total += 1
if self.planta_5 == 1:
contar_total += 1
if self.planta_6 == 1:
contar_total += 1
if self.planta_7 == 1:
contar_total += 1
if self.planta_8 == 1:
contar_total += 1
if self.planta_9 == 1:
contar_total += 1
if self.planta_10 == 1:
contar_total += 1
self.total_si = contar_total
super(VivieroObservacion2, self).save(*args, **kwargs)
# class Meta:
# verbose_name = 'Presencia de plagas y enfermedades'
# verbose_name_plural = 'Presencia de plagas y enfermedades'
class ProductosVivero(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
# class Meta:
# verbose_name = 'Productos para el vivero'
# verbose_name_plural = 'Productos para el vivero'
CHOICE_VIVERO_UNIDAD_PRODUCTOS = ((1,'Onz/planta'),(2,'Lt/bombada'),(3,'onz/bomba'),)
class VivieroObservacionProductos(models.Model):
producto = models.ForeignKey(ProductosVivero, verbose_name='Nombre')
cantidad = models.FloatField()
unidad = models.IntegerField(choices=CHOICE_VIVERO_UNIDAD_PRODUCTOS)
frecuencia = models.FloatField()
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Observación del vivero 3"
CHOICE_VIVERO_ANALISIS_1 = (
('A', 'Ningún problema'),
('B', 'Proveniente de plantas con baja productividad'),
('C', 'Posiblemente con alta incompatibilidad'),
)
CHOICE_VIVERO_ANALISIS_2 = (
('A', 'Ningún problema'),
('B', 'Planta desuniforme'),
('C', 'Plantas con poco vigor'),
('D', 'Plantas con deficiencia nutricionales'),
('E', 'Mal manejo de riego'),
('F', 'Mal manejo de sombra'),
)
CHOICE_VIVERO_ANALISIS_3 = (
('A', 'Zompopo'),
('B', 'Barrenador'),
('C', 'Minador'),
('D', 'Tizón'),
('E', 'Antracnosis'),
('F', 'Mal de talluelo'),
('G', 'Áfidos'),
('H', 'Gusanos'),
)
CHOICE_VIVERO_ANALISIS_4 = (
('A', 'Mejorar la fuente de semilla'),
('B', 'Mezclar las 9 fuentes de semilla'),
('C', 'Mejorar el sustrato en las bolsas'),
('D', 'Mejorar el manejo de plagas'),
('E', 'Mejorar el manejo de nutrición'),
('F', 'Mejorar el riego y sombra'),
)
class VivieroAnalisisSituacion(models.Model):
analisis1 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_1,
verbose_name='¿Cuáles son los problemas de la semilla?')
analisis2 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_2,
verbose_name='¿Cuáles son los problemas las plantas?')
analisis3 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_3,
verbose_name='¿Cuáles son los problemas de plagas y enfermedades?')
analisis4 = MultiSelectField(choices=CHOICE_VIVERO_ANALISIS_4,
verbose_name='¿Qué acciones vamos a realizar para mejorar el vivero?')
ficha = models.ForeignKey(FichaVivero)
def __unicode__(self):
return u"Análisis de la situación y acciones en el vivero"
#-------- fin de ficha vivero -----------------------
#--------- inicia ficha cosecha ----------------------
class FichaCosecha(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_cosecha')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_cosecha')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha cosecha"
verbose_name_plural = "Ficha cosecha"
CHOICE_COSECHA_CONVERSACION_1 = (
('A', 'Por el color'),
('B', 'Por el tamaño'),
('C', 'Por la textura'),
('D', 'Por la fecha'),
)
CHOICE_COSECHA_CONVERSACION_2 = (
('A', 'Media Luna'),
('B', 'Cutacha'),
('C', 'Machete'),
('D', 'Tijera'),
)
CHOICE_COSECHA_CONVERSACION_3 = (
('A', 'Rechazar mazorcas enfermas'),
('B', 'Rechazar mazorcas dañadas'),
('C', 'Rechazar mazorcas sobremaduras'),
('D', 'Rechazar mazorcas inmaduras'),
('E', 'Rechazar mazorcas pequeñas'),
('F', 'Seleccionar mazorcas maduras'),
('G', 'Seleccionar mazorcas de buena calidad'),
)
CHOICE_COSECHA_CONVERSACION_4 = (
('A', 'Media Luna'),
('B', 'Cutacha'),
('C', 'Machete'),
('D', 'Maso'),
)
class CosechaConversacion1(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_1,
verbose_name='1.1-¿Cómo se determina qué la mazorca está madura para cosecha? ')
conversacion2 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_2,
verbose_name='1.2-¿Qué herramientas utiliza para el corte de las mazorcas maduras? ')
conversacion3 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_3,
verbose_name='1.3-¿Qué criterios toma en cuenta para la selección de mazorcas antes del quiebre? ')
conversacion4 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_4,
verbose_name='1.4-¿Qué herramientas utiliza para el quiebre de las mazorcas seleccionadas? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Conversación con la productora o el productor 1"
CHOICE_COSECHA_CONVERSACION_5 = (
('A', 'Bolsa plástica'),
('B', 'Bidón o Balde'),
('C', 'Saco Macen'),
('D', 'Saco de yute'),
('E', 'Cajón de madera'),
)
CHOICE_COSECHA_CONVERSACION_7 = (
('A', 'Entierra las mazorcas'),
('B', 'Botan las mazorcas sin enterrar'),
('C', 'Queman las mazorcas'),
)
CHOICE_COSECHA_CONVERSACION_8 = (
(1, 'Cada mes'),
(2, 'Cada quince días'),
(3, 'Depende de la maduración'),
)
class CosechaConversacion2(models.Model):
conversacion5 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_5,
verbose_name='1.5-¿Qué tipo de almacenamiento emplea después del quiebre de las mazorcas de cacao? ')
conversacion6 = models.FloatField('1.6-¿Cuánto tiempo tarda en llevar el cacao en baba al centro de acopio?')
conversacion7 = MultiSelectField(choices=CHOICE_COSECHA_CONVERSACION_7,
verbose_name='1.7-¿Qué manejo realiza con las mazorcas de cacao enfermas? ')
conversacion8 = models.IntegerField(choices=CHOICE_COSECHA_CONVERSACION_8,
verbose_name='1.8-¿Cada cuánto realizan los cortes? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Conversación con la productora o el productor 2"
CHOICE_COSECHA_9_MESES = (
(1, 'Enero'),
(2, 'Febrero'),
(3, 'Marzo'),
(4, 'Abril'),
(5, 'Mayo'),
(6, 'Junio'),
(7, 'Julio'),
(8, 'Agosto'),
(9, 'Septiembre'),
(10, 'Octubre'),
(11, 'Noviembre'),
(12, 'Diciembre'),
)
CHOICE_COSECHA_9_FLORACION = (
(1, 'No hay flores'),
(2, 'Poca flores'),
(3, 'Algo de flores'),
(4, 'Mucha flores'),
)
class CosechaMesesFloracion(models.Model):
mes = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
floracion = models.IntegerField(choices=CHOICE_COSECHA_9_FLORACION,
verbose_name='Floración')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"¿Cuáles son las meses de mayor floración? "
CHOICE_COSECHA_10_COSECHA = (
(1, 'No hay Cosecha'),
(2, 'Poca cosecha'),
(3, 'Algo de cosecha'),
(4, 'Mucha cosecha'),
)
class CosechaMesesCosecha(models.Model):
mes = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
floracion = models.IntegerField(choices=CHOICE_COSECHA_10_COSECHA,
verbose_name='Cosecha')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"¿Cuáles son las meses de mayor floración? "
CHOICE_COSECHA_ESTIMADO_PUNTOS = (
(1, 'Número de mazorcas sanas'),
(2, 'Número de mazorcas enfermas'),
(3, 'Número de mazorcas dañadas'),
)
class CosechaPunto1(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto1, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.1 Punto 1"
class CosechaPunto2(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto2, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.2 Punto 2"
class CosechaPunto3(models.Model):
mazorcas = models.IntegerField(choices=CHOICE_COSECHA_ESTIMADO_PUNTOS,
verbose_name='Mazorcas')
planta_1 = models.FloatField()
planta_2 = models.FloatField()
planta_3 = models.FloatField()
planta_4 = models.FloatField()
planta_5 = models.FloatField()
planta_6 = models.FloatField()
planta_7 = models.FloatField()
planta_8 = models.FloatField()
planta_9 = models.FloatField()
planta_10 = models.FloatField()
total_platas = models.FloatField(editable=False, null=True, blank=True)
contador = models.IntegerField(editable=False, default=0, null=True, blank=True)
ficha = models.ForeignKey(FichaCosecha)
def save(self, *args, **kwargs):
self.total_platas = self.planta_1 + self.planta_2 + self.planta_3 + self.planta_4 + \
self.planta_5 + self.planta_6 + self.planta_7 + self.planta_8 + self.planta_9 + self.planta_10
contar = 0
if self.planta_1 >= 0:
contar += 1
if self.planta_2 >= 0:
contar += 1
if self.planta_3 >= 0:
contar += 1
if self.planta_4 >= 0:
contar += 1
if self.planta_5 >= 0:
contar += 1
if self.planta_6 >= 0:
contar += 1
if self.planta_7 >= 0:
contar += 1
if self.planta_8 >= 0:
contar += 1
if self.planta_9 >= 0:
contar += 1
if self.planta_10 >= 0:
contar += 1
self.contador = contar
super(CosechaPunto3, self).save(*args, **kwargs)
def __unicode__(self):
return u"2.3 Punto 3"
class CosechaAreaPlantas(models.Model):
area = models.FloatField('Área de la parcela (en mz)')
plantas = models.FloatField('Número de plantas por mz')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Area y número de platas"
CHOICE_COSECHA_ANALISIS_1 = (
('A', 'Pocas plantas productivas'),
('B', 'Muchas mazorcas enfermas'),
('C', 'Muchas mazorcas dañadas'),
('D', 'Muchas mazorcas pequeñas'),
('E', 'Muchas mazorcas con pocos granos'),
('F', 'Muchos granos pequeños'),
)
CHOICE_COSECHA_ANALISIS_2 = (
('A', 'Mazorcas enfermas'),
('B', 'Mazorcas dañadas'),
('C', 'Mazorcas pequeñas'),
)
CHOICE_COSECHA_ANALISIS_3 = (
('A', 'Remover las mazorcas enfermas a tiempo'),
('B', 'Establecer control de las ardillas'),
('C', 'Mejorar la nutrición de las plantas'),
('D', 'Realizar poda de las plantas de cacao'),
('E', 'Regular la sombra'),
('F', 'Cosechar a tiempo'),
('G', 'Reponer las plantas no productivas con plantas productivas'),
)
class CosechaAnalisis(models.Model):
analisis1 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_1,
verbose_name='3.1-¿Cuál es el problema principal que afecta el rendimiento productivo de la parcela de cacao?')
analisis2 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_2,
verbose_name='3.2-¿Cuál es la causa de la pérdida de producción en la parcela de cacao? ')
analisis3 = MultiSelectField(choices=CHOICE_COSECHA_ANALISIS_3,
verbose_name='3.3-¿Qué prácticas se pueden realizar en la parcela de cacao para mejorar la cosecha? ')
ficha = models.ForeignKey(FichaCosecha)
def __unicode__(self):
return u"Análisis sobre la cosecha y acciones"
# ---------------- fin ficha cosecha ---------------------------------
# ---------------- inicia ficha saf ----------------------------------
class FichaSaf(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_saf')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_saf')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha saf"
verbose_name_plural = "Ficha saf"
CHOICE_SAF_1_1 = (
('A', 'Producción convencional con uso intensivo de químicos'),
('B', 'Producción orgánica con insumos naturales'),
('C', 'Producción agroecológica y diversificada'),
('D', 'Producción especializada según el tipo de mercado'),
)
CHOICE_SAF_1_2 = (
('A', 'Producción de cacao'),
('B', 'Producción de frutas'),
('C', 'Producción de madera'),
('D', 'Conservación de suelo y agua'),
('E', 'Aumento de ingresos'),
('F', 'Generar empleo'),
('G', 'Diversidad natural'),
('H', 'Otros beneficios'),
)
class SafConversacion1(models.Model):
conversacion1 = MultiSelectField(choices=CHOICE_SAF_1_1,
verbose_name='1.1¿Cuál fue su objetivo de establecer el cultivo de cacao en sistema agroforestales?')
conversacion2 = MultiSelectField(choices=CHOICE_SAF_1_2,
verbose_name='1.2¿Qué beneficios esperaban del sistema agroforestal en su parcela de cacao?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Conversación 1"
CHOICE_SAF_1_3 = (
(1, 'Nada de lluvia'),
(2, 'Poca lluvia'),
(3, 'Algo de lluvia'),
(4, 'Mucha lluvia'),
)
class SafConversacion2(models.Model):
conversacion3 = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
conversacion4 = models.IntegerField(choices=CHOICE_SAF_1_3,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.3¿Cuáles son meses más lluviosos en su finca?"
CHOICE_SAF_1_4 = (
(1, 'Nada de viento'),
(2, 'Poco viento'),
(3, 'Algo de viento'),
(4, 'Mucho viento'),
)
class SafConversacion3(models.Model):
conversacion3 = models.IntegerField(choices=CHOICE_COSECHA_9_MESES,
verbose_name='Meses')
conversacion4 = models.IntegerField(choices=CHOICE_SAF_1_4,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.4¿Cuáles son meses más ventosos en su finca?"
CHOICE_SAF_1_5 = (
(1, 'Establecer el vivero'),
(2, 'Limpieza de terreno'),
(3, 'Siembra de cacao'),
(4, 'Establecer la sombra'),
(5, 'Poda de cacao'),
(6, 'Manejo de sombra'),
(7, 'Deshierba'),
(8, 'Abonar'),
(9, 'Foliar'),
(10, 'Deschuponar'),
(11, 'Cortar mazorcas enfermas'),
(12, 'Cosecha y Quiebre'),
)
class SafConversacion4(models.Model):
conversacion5 = models.IntegerField(choices=CHOICE_SAF_1_5,
verbose_name='Opcion')
conversacion6 = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='Opciones')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.5¿Cómo toma en cuenta lluvia y viento para decidir los momentos de las labores de sistema agroforestal?"
CHOICE_SAF_1_5_TOPOGRAFIA = (
(1, 'Terreno plano'),
(2, 'Terreno con poco pendiente'),
(3, 'Terreno con mucho pendiente'),
)
CHOICE_SAF_1_5_FERTILIDAD = (
(1, 'Suelo fértil'),
(2, 'Suelo poco fértil'),
(3, 'Suelo degradado y compacto'),
)
class SafConversacion5(models.Model):
conversacion7 = models.IntegerField(choices=CHOICE_SAF_1_5_TOPOGRAFIA,
verbose_name='Topografía')
conversacion8 = models.IntegerField(choices=CHOICE_SAF_1_5_FERTILIDAD,
verbose_name='Fertilidad')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.5¿Cómo son las características del suelo y su fertilidad?"
CHOICE_SAF_1_6_MADERABLE = (
('A', 'Que tenga buena altura'),
('B', 'Que no tenga hojas en el verano'),
('C', 'Que tenga hojas en el verano '),
('D', 'Que tenga crecimiento rápido '),
('E', 'Que tenga una sombre no muy densa '),
('F', 'Que tenga valor comercial '),
('G', 'Que es fácil para podar '),
)
CHOICE_SAF_1_6_FRUTALES = (
('A', 'Que produce buenos elementos '),
('B', 'Que ayuda a manejar el daño de pájaros y ardillas'),
('C', 'Que tenga resistencia a plagas '),
('D', 'Que tenga una sombre no muy densa'),
('E', 'Que tenga valor comercial'),
('F', 'Que es fácil para manejar'),
)
CHOICE_SAF_1_6_SERVICIOS = (
('A', 'Que produce más y mejor hojarasca '),
('B', 'Que las hojas dan nutrientes'),
('C', 'Que no compiten con cacao'),
('D', 'Que dan buena sombra'),
('E', 'Que tienen hojas todo el tiempo'),
('F', 'Que producen leña'),
('G', 'Que tenga uso medicinal'),
('H', 'Que adapte bien en la zona '),
)
class SafConversacion6(models.Model):
conversacion9 = MultiSelectField(choices=CHOICE_SAF_1_6_MADERABLE,
verbose_name='Para escoger a los árboles maderable ')
conversacion10 = MultiSelectField(choices=CHOICE_SAF_1_6_FRUTALES,
verbose_name='Para escoger a los árboles frutales')
conversacion11 = MultiSelectField(choices=CHOICE_SAF_1_6_SERVICIOS,
verbose_name='Para escoger a los árboles que proveen servicios')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"¿Cuáles son sus criterio para escoger los árboles para acompañar el cacao?"
CHOICE_SAF_1_6_ETAPA = (
(1, 'Crecimiento vegetativo'),
(2, 'Floración'),
(3, 'Cuajado y maduración'),
(4, 'Cosecha'),
)
CHOICE_SAF_1_6_NIVEL_SOMBRA = (
(1, 'Sin sombra'),
(2, 'Poca Sombra'),
(3, 'Media sombra'),
(4, 'Mucha sombra'),
)
class SafConversacion7(models.Model):
conversacion12 = models.IntegerField(choices=CHOICE_SAF_1_6_ETAPA,
verbose_name='Etapas')
conversacion13 = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='Meses que ocurren')
conversacion14 = models.IntegerField(choices=CHOICE_SAF_1_6_NIVEL_SOMBRA,
verbose_name='Nivel de sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.6¿Cómo quisiera tener la sombra en diferentes momentos de vida de cacao?"
CHOICE_SAF_1_7_PROBLEMAS = (
(1, 'Poca floración'),
(2, 'Presencia de malezas'),
(3, 'Presencia de Monilia'),
(4, 'Presencia de mazorca negra'),
(5, 'Baja producción'),
(6, 'Daño de ardillas'),
)
CHOICE_SAF_1_7_CAUSA_PROBLEMAS = (
(1, 'Poca Sombra'),
(2, 'Mucha Sombra'),
)
class SafConversacion8(models.Model):
conversacion15 = models.IntegerField(choices=CHOICE_SAF_1_7_PROBLEMAS,
verbose_name='Problemas')
conversacion16 = models.IntegerField(choices=CHOICE_SAF_1_7_CAUSA_PROBLEMAS,
verbose_name='Que causa el problema')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"1.7¿Cuál es la percepción de los problemas en relación a la sombra?"
CHOICE_SAF_1_8 = (
(1, 'De propia finca'),
(2, 'De propia finca árboles élites'),
(3, 'De finca vecina'),
(4, 'De jardines clónales'),
(5, 'De afuera del territorio '),
)
CHOICE_SAF_1_9 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'No sabe'),
)
CHOICE_SAF_1_10 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'Cacao rojo'),
('G', 'No sabe'),
)
CHOICE_SAF_1_11 = (
('A', 'Cacao criollo'),
('B', 'Cacao forastero'),
('C', 'Cacao Trinitario'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
('F', 'Cacao rojo'),
('G', 'No sabe'),
)
class SafConversacion9(models.Model):
conversacion17 = models.IntegerField(choices=CHOICE_SAF_1_8,
verbose_name='1.8¿De dónde obtuvo la semilla para establecer la plantación de cacao? ')
conversacion18 = MultiSelectField(choices=CHOICE_SAF_1_9,
verbose_name='1.9¿Con que tipo de cacao se estableció la plantación de cacao? ')
conversacion19 = MultiSelectField(choices=CHOICE_SAF_1_10,
verbose_name='1.10¿Cuáles son las variedades de cacao tolerantes a las enfermedades? ')
conversacion20 = MultiSelectField(choices=CHOICE_SAF_1_11,
verbose_name='1.11¿Qué tipo de variedades le han recomendado para resiembra y en nuevas plantaciones de cacao? ')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"18,19,20"
CHOICE_SAF_2_TEMA1 = (
(1, 'Cantidad de lombrices/250 cm2'),
)
CHOICE_SAF_2_TEMA2 = (
(1, 'Grado de efervescencia con prueba de Agua Oxigenada'),
)
CHOICE_SAF_2_OPCIONES = (
(1, 'Baja'),
(2, 'Media'),
(3, 'Alta'),
)
class SafObservaciones(models.Model):
observacion1 = models.IntegerField(choices=CHOICE_SAF_2_TEMA1,
verbose_name='Tema')
observacion2 = models.FloatField('Punto 1')
observacion3 = models.FloatField('Punto 2')
observacion4 = models.FloatField('Punto 3')
observacion5 = models.FloatField('Punto 4')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Calidad de vida de suelo 1"
class SafObservaciones2(models.Model):
observacion1 = models.IntegerField(choices=CHOICE_SAF_2_TEMA2,
verbose_name='Tema')
observacion2 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 1')
observacion3 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 2')
observacion4 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 3')
observacion5 = models.IntegerField(choices=CHOICE_SAF_2_OPCIONES,
verbose_name='Punto 4')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Calidad de vida de suelo 2"
CHOICE_SAF_OBSERVACION_2_2 = (
(1, 'Bueno y apto para cacao'),
(2, 'Regular necesita enmienda para mejorar'),
(3, 'Malo y no apto para Cacao'),
(4,'Degradado y compacto no apto para cacao')
)
CHOICE_SAF_OBSERVACION_2_3 = (
('A', 'Promover o sembrar cobertura'),
('B', 'Sembrar árboles que provee buena hojarasca'),
('C', 'Utilizar materiales de poda de sombra y cacao'),
('D', 'Utilizar materiales de banano'),
('E', 'Utilizar abono verde'),
('F', 'Utilizar abono orgánico'),
)
class SafObservaciones3(models.Model):
observacion6 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_2,
verbose_name='2.2Según lo observado en las pruebas de suelo cómo valora es estado de suelo')
observacion7 = MultiSelectField(choices=CHOICE_SAF_OBSERVACION_2_3,
verbose_name='2.3¿Qué prácticas se pueden hacer en el suelo de su parcela de aprendizaje para mejorar el la vida de suelo?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Observacion 2.2 y 2.3"
class SafObservacionPunto1(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 1"
class SafObservacionPunto2(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 2"
class SafObservacionPunto3(models.Model):
especies = models.ForeignKey(Especies)
cantidad = models.FloatField()
lena = models.FloatField('Para leña')
nutrientes = models.FloatField('Para nutrientes')
frutas = models.FloatField('Para Frutas')
madera = models.FloatField('Para Madera')
sombra = models.FloatField('Para sombra')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Punto 3"
CHOICE_SAF_OBSERVACION_2_5 = (
(1, 'Cuadrado'),
(2, 'Rectangular'),
(3, 'Tres bolillos'),
(4, 'Sin arreglo')
)
CHOICE_SAF_OBSERVACION_2_6 = (
(1, 'Demasiado árboles y mucha sombra'),
(2, 'Muy poca árboles y poca sombra'),
(3, 'Plantas de cacao y otros árboles compiten'),
(4, 'No hay problema y arreglo esta bien')
)
CHOICE_SAF_OBSERVACION_2_7 = (
(1, 'Cacao + maderable + musáceas + pejibaye'),
(2, 'Cacao + musáceas + cultivos anuales'),
(3, 'Cacao + maderables + musáceas'),
(4, 'Cacao + musáceas + leguminosa + maderables'),
(5, 'Cacao + musáceas + leguminosa + maderables+ frutales'),
)
CHOICE_SAF_OBSERVACION_2_8 = (
('A', 'Mejorar la producción de cacao'),
('B', 'Diversificar la producción e ingreso'),
('C', 'Producir más alimento'),
('D', 'Producir leña'),
('E', 'Producir madera'),
('F', 'Mejorar la conservación de Recursos naturales'),
)
class SafObservaciones4(models.Model):
observacion8 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_5,
verbose_name='2.5 ¿Cómo es el arreglo de la plantación?')
observacion9 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_6,
verbose_name='2.6 ¿Qué dificultades le ha generado su diseño actual de plantación de cacao?')
observacion10 = models.IntegerField(choices=CHOICE_SAF_OBSERVACION_2_7,
verbose_name='2.7 ¿Cuál sería el diseño para mejorar el sistema agroforestal cacao? ')
observacion11 = MultiSelectField(choices=CHOICE_SAF_OBSERVACION_2_8,
verbose_name='2.8 ¿Por qué toma la decisión de establecer el diseño seleccionado?')
ficha = models.ForeignKey(FichaSaf)
def __unicode__(self):
return u"Observacion 2.5 al 2.8"
#--------------------- fin ficha saf -----------
#---------------------- Ficha Cierre -----------
class FichaCierre(models.Model):
productor = models.ForeignKey(
Persona,
verbose_name='Nombre de productor o productora',
related_name='persona_productor_cierre')
tecnico = models.ForeignKey(
Persona,
verbose_name='Nombre de técnico',
related_name='persona_tecnico_cierre')
fecha_visita = models.DateField()
def __unicode__(self):
return self.productor.nombre
class Meta:
verbose_name = "Ficha cierre"
verbose_name_plural = "Ficha cierre"
CHOICE_CIERRE_1_1_IMPACTO = (
('A', 'Tipo de árboles y cantidad'),
('B', 'Mucha sombra de los árboles'),
('C', 'Poca sombra de los árboles'),
('D', 'Efecto de sombra sobre las plagas y enfermedades'),
('E', 'Efecto de sombra sobre la producción'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_1_PLANIFICADA = (
('A', 'Regulación de sombra'),
('B', 'Eliminación de árboles'),
('C', 'Sembrar árboles'),
('D', 'Eliminar musaceas'),
('E', 'Sembrar musaceas y sombra temporal'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_1_REALIZADA = (
('A', 'Regulación de sombra'),
('B', 'Eliminación de árboles'),
('C', 'Sembrar árboles'),
('D', 'Eliminar musaceas'),
('E', 'Sembrar musaceas y sombra temporal'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_1_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Mejor control de malas hierbas'),
('C', 'Reducción de enfermedades'),
('D', 'Eliminar musaceas'),
('E', 'Ninguna'),
)
class CierreManejo1(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_1_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_1_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_1_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_1_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.1"
class Meta:
verbose_name='1.1 Sombra'
verbose_name_plural='1.1 Sombra'
CHOICE_CIERRE_1_2_IMPACTO = (
('A', 'Altura y ancho de plantas de cacao'),
('B', 'Falta de horquetas'),
('C', 'Muchas ramas bajeras y entrecruzadas'),
('D', 'Poca penetración de luz'),
('E', 'Relación entre poda y productividad'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_2_PLANIFICADA = (
('A', 'Descope de las plantas'),
('B', 'Poda de las ramas entrecruzadas'),
('C', 'Eliminar los chupones'),
('D', 'Formar horquetas'),
('E', 'Eliminar ramas bajeras'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_2_REALIZADA = (
('A', 'Descope de las plantas'),
('B', 'Poda de las ramas entrecruzadas'),
('C', 'Eliminar los chupones'),
('D', 'Formar horquetas'),
('E', 'Eliminar ramas bajeras'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_2_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Mejor entrada de luz'),
('C', 'Reducción de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo2(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_2_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_2_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_2_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_2_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.2"
class Meta:
verbose_name='1.2 Poda'
verbose_name_plural='1.2 Poda'
CHOICE_CIERRE_1_3_IMPACTO = (
('A', 'Falta de obra de conservación'),
('B', 'Falta de obra de drenaje'),
('C', 'Deficiencia o desbalance de nutrientes'),
('D', 'Estado de fertilidad de suelo'),
('E', 'Relación entre suelo, fertilidad y la productividad'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_3_PLANIFICADA = (
('A', 'Aplicar abono orgánicos'),
('B', 'Aplicar abono mineral'),
('C', 'Aplicar Cal o Ceniza'),
('D', 'Abonar según datos de análisis'),
('E', 'Sembrar abono verde y cobertura'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_3_REALIZADA = (
('A', 'Aplicar abono orgánicos'),
('B', 'Aplicar abono mineral'),
('C', 'Aplicar Cal o Ceniza'),
('D', 'Abonar según datos de análisis'),
('E', 'Sembrar abono verde y cobertura'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_3_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Aumento de la floración'),
('C', 'Reducción de enfermedades'),
('D', 'Abonar según datos de análisis'),
('E', 'Ninguna'),
)
class CierreManejo3(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_3_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_3_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_3_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_3_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.3"
class Meta:
verbose_name='1.3 Suelo'
verbose_name_plural='1.3 Suelo'
CHOICE_CIERRE_1_4_IMPACTO = (
('A', 'Variedad de plagas y enfermedades'),
('B', 'Nivel de daño de plagas y enfermedades'),
('C', 'Relación entre poda , plagas y enfermedades'),
('D', 'Relación entre sombra y plagas y enfermedades'),
('E', 'Impacto de plagas y enfermedades sobre producción'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_4_PLANIFICADA = (
('A', 'Realizar recuentos'),
('B', 'Mejorar la sombra'),
('C', 'Mejorar la poda'),
('D', 'Eliminar mazorcas enfermas'),
('E', 'Aplicar caldo sulfo-calcico'),
('F', 'Aplicar bio-fermentados'),
('G', 'Ninguna'),
('H', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_4_REALIZADA = (
('A', 'Realizar recuentos'),
('B', 'Mejorar la sombra'),
('C', 'Mejorar la poda'),
('D', 'Eliminar mazorcas enfermas'),
('E', 'Aplicar caldo sulfo-calcico'),
('F', 'Aplicar bio-fermentados'),
('G', 'Ninguna'),
('H', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_4_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Reducción de daño de plagas'),
('C', 'Reducción de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo4(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_4_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_4_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_4_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_4_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.4"
class Meta:
verbose_name='1.4 Plaga'
verbose_name_plural='1.4 Plaga'
CHOICE_CIERRE_1_5_IMPACTO = (
('A', 'Variedad de mala hierbas'),
('B', 'Nivel de daño de mala hierbas'),
('C', 'Relación entre chapoda y composición del piso'),
('D', 'Relación entre herbicidas y composición del piso'),
('E', 'Cantidad de bejucos en el piso y plantas'),
('F', 'Ninguna'),
('G', 'Falta de materia organica'),
)
CHOICE_CIERRE_1_5_PLANIFICADA = (
('A', 'Realizar conteo'),
('B', 'Mejorar la sombra'),
('C', 'Eliminar bejucos'),
('D', 'Eliminar tanda'),
('E', 'Realizar manejo selectivo'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
('H', 'Repartir hojarasca'),
)
CHOICE_CIERRE_1_5_REALIZADA = (
('A', 'Realizar conteo'),
('B', 'Mejorar la sombra'),
('C', 'Eliminar bejucos'),
('D', 'Eliminar tanda'),
('E', 'Realizar manejo selectivo'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
('H', 'Repartir hojarasca'),
)
CHOICE_CIERRE_1_5_RESULTADOS = (
('A', 'Aumento de producción'),
('B', 'Reducción de malas hierbas dañinas'),
('C', 'Aumento de cobertura'),
('D', 'Eliminar tanda'),
('E', 'Ninguna'),
)
class CierreManejo5(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_5_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_5_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_5_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_5_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.5"
class Meta:
verbose_name='1.5 Piso'
verbose_name_plural='1.5 Piso'
CHOICE_CIERRE_1_6_IMPACTO = (
('A', 'Tipo de cacao que estamos sembrando'),
('B', 'Auto-incompatibilidad de las semillas'),
('C', 'La calidad de semillas'),
('D', 'Incidencia de plagas y enfermedades en vivero'),
('E', 'Calidad de plantas'),
('F', 'Ninguna'),
)
CHOICE_CIERRE_1_6_PLANIFICADA = (
('A', 'Seleccionar mazorcas y mezclar para conseguir semilla'),
('B', 'Utilizar mejor calidad de semillas'),
('C', 'Mejorar el sustrato'),
('D', 'Mejorar el tamaño de bolsa'),
('E', 'Mejorar manejo de enfermedades y plagas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_6_REALIZADA = (
('A', 'Seleccionar mazorcas y mezclar para conseguir semilla'),
('B', 'Utilizar mejor calidad de semillas'),
('C', 'Mejorar el sustrato'),
('D', 'Mejorar el tamaño de bolsa'),
('E', 'Mejorar manejo de enfermedades y plagas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_6_RESULTADOS = (
('A', 'Mejor vigor de las plantas'),
('B', 'Menos daño de plagas'),
('C', 'Menos daño de enfermedades'),
('D', 'Ninguna'),
)
class CierreManejo6(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_6_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_6_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_6_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_6_RESULTADOS,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.6"
class Meta:
verbose_name='1.6 Vivero'
verbose_name_plural='1.6 Vivero'
CHOICE_CIERRE_1_7_IMPACTO = (
('A', 'Cantidad de planta productiva'),
('B', 'Numero de mazorcas sanas'),
('C', 'Numero de mazorcas dañadas'),
('D', 'Nivel de cosecha de la parcela'),
('E', 'Ninguna'),
('F', 'Efecto de sombra sobre la producción'),
('G', 'Efecto de poda sobre la producción'),
)
CHOICE_CIERRE_1_7_PLANIFICADA = (
('A', 'Mejorar la poda y sombra'),
('B', 'Mejorar la fertilización'),
('C', 'Mejorar manejo de plagas'),
('D', 'Eliminar planta poca productivas'),
('E', 'Sembrar plantas más productivas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_7_REALIZADA = (
('A', 'Mejorar la poda y sombra'),
('B', 'Mejorar la fertilización'),
('C', 'Mejorar manejo de plagas'),
('D', 'Eliminar planta poca productivas'),
('E', 'Sembrar plantas más productivas'),
('F', 'Ninguna'),
('G', 'Ninguna por falta de recursos'),
)
CHOICE_CIERRE_1_7_RESULTADO = (
('A', 'Aumento de la cosecha'),
('B', 'Aumento de plantas productivas'),
('C', 'Mejor calidad de mazorcas'),
('D', 'Mejor calidad de granos'),
('E', 'Ninguna'),
)
class CierreManejo7(models.Model):
campo1 = MultiSelectField(choices=CHOICE_CIERRE_1_7_IMPACTO,
verbose_name='Observación que impacto')
campo2 = MultiSelectField(choices=CHOICE_CIERRE_1_7_PLANIFICADA,
verbose_name='Acciones planificadas')
campo3 = MultiSelectField(choices=CHOICE_CIERRE_1_7_REALIZADA,
verbose_name='Acciones realizadas')
campo4 = MultiSelectField(choices=CHOICE_CIERRE_1_7_RESULTADO,
verbose_name='Resultados obtenidos', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"1.7"
class Meta:
verbose_name='1.7 Cosecha'
verbose_name_plural='1.7 Cosecha'
CHOICE_CIERRE_COSTO_1 = (
('A', 'Cacao Criollo'),
('B', 'Cacao Trinitario'),
('C', 'Cacao Forastero'),
('D', 'Cacao híbrido'),
('E', 'Clones de cacao'),
)
class CierreCosto1(models.Model):
costo = models.FloatField('Costo de mano de obra C$/día')
area = models.FloatField('Área de parcela de cacao en mz')
tipo = MultiSelectField(choices=CHOICE_CIERRE_COSTO_1,
verbose_name='Tipo de Cacao ')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2"
class ActividadesCierre(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural='Actividades de cierre'
class CierreActividad(models.Model):
actividad = models.ForeignKey(ActividadesCierre)
meses = MultiSelectField(choices=CHOICES_FECHA_PODA,
verbose_name='En qué meses realizan')
familiar = models.FloatField('Uso de DP familiar')
contratada = models.FloatField('Uso de DP contratada')
insumo = models.CharField('Uso Insumo', max_length=250)
costo = models.FloatField('Costo de insumo en C$')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2.1"
class CierreBabaRoja(models.Model):
campo1 = models.FloatField('Cosecha anual qq baba', null=True, blank=True)
campo2 = models.FloatField('Venta qq baba', null=True, blank=True)
campo3 = models.FloatField('Precio de venta qq baba', null=True, blank=True)
campo4 = models.FloatField('Cosecha anual qq grano rojo', null=True, blank=True)
campo5 = models.FloatField('Venta qq grano rojo', null=True, blank=True)
campo6 = models.FloatField('Precio de venta qq grano rojo', null=True, blank=True)
campo7 = models.FloatField('Consumo anual qq grano rojo', null=True, blank=True)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"2.2"
class Meta:
verbose_name='Datos'
verbose_name_plural='Datos'
class ManejosCierre(models.Model):
nombre = models.CharField(max_length=250)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name_plural='Manejos de cierre'
class CierreManejo(models.Model):
manejo = models.ForeignKey(ManejosCierre)
reposo = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
crecimiento = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
floracion = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
cosecha = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"3"
CHOICE_CIERRE_CONOCIMIENTO_TEMA1 = ((1, 'Variedad más común en mi finca'),)
class CierreConocimiento1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA1)
criollas = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
forastero = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
trinitaria = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
hibridos = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
clones = models.IntegerField(choices=((1,'Si'),(2,'No'),) )
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 1"
CHOICE_CIERRE_CONOCIMIENTO_TEMA2 = ((1, 'Ventajas de variedades'),)
CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS = (
('A', 'Produce más'),
('B', 'Granos grandes'),
('C', 'Tolerante a plagas y enfermedades'),
('D', 'Tiene buena estructura'),
('E', 'No necesita mucho abono'),
('F', 'No aplica'),
)
class CierreConocimiento2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA2)
criollas = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
forastero = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
trinitaria = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
hibridos = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
clones = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 2"
CHOICE_CIERRE_CONOCIMIENTO_TEMA3 = ((1, 'Desventajas de variedades'),)
CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3 = (
('A', 'Produce poco'),
('B', 'Granos menudos'),
('C', 'Susceptible a plagas y enfermedades'),
('D', 'No tiene buena estructura'),
('E', 'Necesita mucho abono'),
('F', 'No aplica'),
)
class CierreConocimiento3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_CONOCIMIENTO_TEMA3)
criollas = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
forastero = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
trinitaria = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
hibridos = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
clones = MultiSelectField(choices=CHOICE_CIERRE_CONOCIMIENTO_RESPUESTAS3)
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 tema 3"
CHOICE_CIERRE_SUELO_TEMA1 = ((1, 'Que elementos aportan'),)
CHOICE_CIERRE_SUELO_RESPUESTAS1 = (
('A', 'Nitrógeno'),
('B', 'Fósforo'),
('C', 'Potasio'),
('D', 'Calcio'),
('E', 'Magnesio'),
('F', 'No aplica'),
)
class CierreSuelo1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA1)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS1,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 1"
CHOICE_CIERRE_SUELO_TEMA2 = ((1, 'Ventajas de esta práctica'),)
CHOICE_CIERRE_SUELO_RESPUESTAS2 = (
('A', 'Fácil de implementar'),
('B', 'De bajo costo'),
('C', 'No necesita mucha inversión'),
('D', 'No necesita mucha mano de obra'),
('E', 'Aporta al desarrollo de las plantas'),
('F', 'No aplica'),
)
class CierreSuelo2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA2)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS2,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 2"
CHOICE_CIERRE_SUELO_TEMA3 = ((1, 'Desventajas de variedades'),)
CHOICE_CIERRE_SUELO_RESPUESTAS3 = (
('A', 'Difícil de implementar'),
('B', 'Alto costo'),
('C', 'Necesita mucha inversión'),
('D', 'Necesita mucha mano de obra'),
('E', 'No aporta al desarrollo de las plantas'),
('F', 'No aplica'),
)
class CierreSuelo3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_SUELO_TEMA3)
abono = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Abono verde y coberturas')
hojarasca = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Hojarasca de los árboles')
organico = MultiSelectField(choices=CHOICE_CIERRE_SUELO_RESPUESTAS3,
verbose_name='Abono orgánico')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .2 tema 3"
CHOICE_CIERRE_PLAGA_TEMA1 = ((1, 'Nivel de daño en la parcela'),
(2, 'Nivel de daño en las fincas vecinas'),)
class CierrePlaga1(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA1)
monilla = models.FloatField()
mazorca = models.FloatField('Mazorca Negra')
zompopos = models.FloatField()
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 1"
CHOICE_CIERRE_PLAGA_TEMA2 = ((1, 'Prácticas para prevenir'),)
CHOICE_CIERRE_PLAGA_RESPUESTAS2 = (
('A', 'Eliminar mazorcas enfermas'),
('B', 'Realizar poda'),
('C', 'Manejo de sombra'),
('D', 'Abonar las plantas'),
('E', 'Buen manejo de piso'),
('F', 'No aplica'),
)
CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO = (
('A', 'Eliminar zompoperas'),
('B', 'Realizar caseo'),
('C', 'Sembrar plantas repelentes'),
('D', 'Utilizar cal o ceniza'),
('E', 'Buen manejo de piso'),
('F', 'No aplica'),
)
class CierrePlaga2(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA2)
monilla = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS2,
verbose_name='Monilla')
mazorca = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS2,
verbose_name='Mazorca Negra')
zompopos = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO,
verbose_name='Zompopos')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 2"
CHOICE_CIERRE_PLAGA_TEMA3 = ((1, 'Prácticas para controlar'),)
CHOICE_CIERRE_PLAGA_RESPUESTAS3 = (
('A', 'Aplicar caldo sulfo-calcico'),
('B', 'Aplicar fungicidas'),
('C', 'No aplica'),
)
CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO3 = (
('A', 'Aplicar venenos en las zompoperas'),
('B', 'Proteger las plantas con plástico'),
('C', 'No aplica'),
)
class CierrePlaga3(models.Model):
tema = models.IntegerField(choices=CHOICE_CIERRE_PLAGA_TEMA3)
monilla = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS3,
verbose_name='Monilla')
mazorca = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS3,
verbose_name='Mazorca Negra')
zompopos = MultiSelectField(choices=CHOICE_CIERRE_PLAGA_RESPUESTAS_ZOMPOPO3,
verbose_name='Zompopos')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"4 .3 tema 3"
CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA = (
(1, 'Mucho'),
(2, 'Algo'),
(3, 'Poco'),
(4, 'Nada '),
)
CHOICE_CIERRE_CICLO_TRABAJO2_RESPUESTA = (
(1, 'Todas'),
(2, 'Algunas'),
(3, 'Pocas'),
(4, 'Ninguna'),
)
CHOICE_CIERRE_CICLO_TRABAJO3_RESPUESTA = (
(1, 'Demasiada visitas'),
(2, 'Adecuadas visitas'),
(3, 'Pocas visitas'),
)
CHOICE_CIERRE_CICLO_TRABAJO4_RESPUESTA = (
(1, 'Demasiada larga'),
(2, 'Adecuado tiempo '),
(3, 'Muy corta'),
)
CHOICE_CIERRE_CICLO_TRABAJO5_RESPUESTA = (
(1, 'Si y con mucho ánimo'),
(2, 'Si pero con poco ánimo'),
(3, 'Si porque siento obligado'),
(4, 'No quiero seguir'),
)
class CierreCicloTrabajo(models.Model):
pregunta1 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las visitas que hemos realizados han servido para aprender nuevas cosas? ')
pregunta2 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las visitas que hemos realizados han servido para observar sobre diferentes aspectos de la parcela de cacao? ')
pregunta3 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO1_RESPUESTA,
verbose_name='¿Las observaciones y discusiones han servido para mejorar el manejo de las parcela de cacao?')
pregunta4 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO2_RESPUESTA,
verbose_name='¿Han podido implementar las acciones que se acordaron a partir de las visitas?')
pregunta5 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO3_RESPUESTA,
verbose_name='¿Qué piensa sobre la frecuencia de las visitas?')
pregunta6 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO4_RESPUESTA,
verbose_name='¿Qué piensa sobre el tiempo que dura cada visita?')
pregunta7 = models.IntegerField(choices=CHOICE_CIERRE_CICLO_TRABAJO5_RESPUESTA,
verbose_name='¿Quiere seguir trabajando con las visitas para el segundo ciclo?')
pregunta8 = models.IntegerField(choices=((1,'Si'),(2,'No'),),
verbose_name='Estaría usted interesado organizar un día de campo en su finca para que otras y otros productores vengan a visitar la parcela?')
pregunta9 = models.TextField('¿Qué sugiere para mejorar el trabajo de este ciclo?')
ficha = models.ForeignKey(FichaCierre)
def __unicode__(self):
return u"5 ciclo de trabajo"
|
mit
|
mute/elasticsearch
|
dev-tools/build_release.py
|
27
|
34065
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from functools import partial
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING)
- S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM)
"""
env = os.environ
PLUGINS = [('license', 'elasticsearch/license/latest'),
('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(release_version, run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
# dont sign the RPM, so older distros will be able to use the uploaded RPM package
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=false' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
if env.get('GPG_KEYRING'):
gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING')
run_mvn('clean %s -DskipTests %s' % (target, gpg_args))
success = False
try:
# create additional signed RPM for the repositories
run_mvn('-f distribution/rpm/pom.xml package -DskipTests -Dsign.rpm=true -Drpm.outputDirectory=target/releases/signed/ %s' % (gpg_args))
rpm = os.path.join('target/releases/signed', 'elasticsearch-%s.rpm' % release_version)
if os.path.isfile(rpm):
log('Signed RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elastic/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['html_url'])
raise RuntimeError('Found open issues for release version %s:\n%s' % (version, '\n'.join(urls)))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elastic.co/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elastic.co/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release):
artifacts = []
artifacts.append(os.path.join('distribution/zip/target/releases', 'elasticsearch-%s.zip' % (release)))
artifacts.append(os.path.join('distribution/tar/target/releases', 'elasticsearch-%s.tar.gz' % (release)))
artifacts.append(os.path.join('distribution/deb/target/releases', 'elasticsearch-%s.deb' % (release)))
artifacts.append(os.path.join('distribution/rpm/target/releases', 'elasticsearch-%s.rpm' % (release)))
return artifacts
def get_artifacts(release):
common_artifacts = artifact_names(release)
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
return common_artifacts
# Sample URL:
# http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/elasticsearch-rpm/2.0.0-beta1-SNAPSHOT/elasticsearch-rpm-2.0.0-beta1-SNAPSHOT.rpm
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
if name.endswith('tar.gz'):
url = '%s/tar/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('zip'):
url = '%s/zip/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('rpm'):
url = '%s/rpm/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('deb'):
url = '%s/deb/elasticsearch/%s/%s' % (base_url, release, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s install %s' % (java_exe(), es_plugin_path, plugin))
plugin_names[name] = True
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on -Des.script.indexed=on %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.jvms=1 -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_repositories(version, dry_run=True):
if dry_run:
print('Skipping package repository update')
else:
print('Triggering repository update for version %s - calling dev-tools/build_repositories.sh %s' % (version, src_branch))
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
run('dev-tools/build_repositories.sh %s' % src_branch)
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed' % (name))
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
# Checks all source files for //NORELEASE comments
def check_norelease(path='src'):
pattern = re.compile(r'\bnorelease\b', re.IGNORECASE)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.java'):
full_path = os.path.join(root, file_name)
line_number = 0
with open(full_path, 'r', encoding='utf-8') as current_file:
for line in current_file:
line_number = line_number + 1
if pattern.search(line):
raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number))
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
env[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_ACCESS_KEY_ID... ', 'AWS_SECRET_ACCESS_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY_ID... ', 'AWS_ACCESS_KEY_ID'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_USERNAME... ', 'SONATYPE_USERNAME'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_PASSWORD... ', 'SONATYPE_PASSWORD'))
checks.append(check_env_var('Checking for GPG env configuration GPG_KEY_ID... ', 'GPG_KEY_ID'))
checks.append(check_env_var('Checking for GPG env configuration GPG_PASSPHRASE... ', 'GPG_PASSPHRASE'))
checks.append(check_env_var('Checking for S3 repo upload env configuration S3_BUCKET_SYNC_TO... ', 'S3_BUCKET_SYNC_TO'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_NAME... ', 'GIT_AUTHOR_NAME'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_EMAIL... ', 'GIT_AUTHOR_EMAIL'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: apt-ftparchive... ', partial(check_command_exists, 'apt-ftparchive', 'apt-ftparchive --version')))
# boto, check error code being returned
location = os.path.dirname(os.path.realpath(__file__))
command = 'python %s/upload-s3.py -h' % (location)
checks.append(run_and_print('Testing boto python dependency... ', partial(check_command_exists, 'python-boto', command)))
checks.append(run_and_print('Checking java version... ', partial(verify_java_version, '1.7')))
checks.append(run_and_print('Checking java mvn version... ', partial(verify_mvn_java_version, '1.7', MVN)))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.add_argument('--check-only', dest='check_only', action='store_true',
help='Checks and reports for all requirements and then exits')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
parser.set_defaults(check_only=False)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
check_environment_and_commandline_tools(args.check_only)
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# we require to build with 1.7
verify_java_version('1.7')
verify_mvn_java_version('1.7', MVN)
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
check_norelease(path='src')
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype and S3 - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(release_version, run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' Updating package repositories -- dry_run: %s' % dry_run)
publish_repositories(src_branch, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elastic/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: http://central.sonatype.org/pages/releasing-the-deployment.html
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
|
apache-2.0
|
kastriothaliti/techstitution
|
venv/lib/python3.5/site-packages/flask/config.py
|
781
|
6234
|
# -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
gpl-3.0
|
isandlaTech/cohorte-demos
|
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/requests/packages/chardet/langthaimodel.py
|
2930
|
11275
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
apache-2.0
|
hojel/calibre
|
src/calibre/utils/opensearch/query.py
|
24
|
2570
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2006, Ed Summers <ehs@pobox.com>'
__docformat__ = 'restructuredtext en'
from urlparse import urlparse, urlunparse, parse_qs
from urllib import urlencode
class Query(object):
'''
Represents an opensearch query Really this class is just a
helper for substituting values into the macros in a format.
format = 'http://beta.indeed.com/opensearch?q={searchTerms}&start={startIndex}&limit={count}'
q = Query(format)
q.searchTerms('zx81')
q.startIndex = 1
q.count = 25
print q.url()
'''
standard_macros = ['searchTerms', 'count', 'startIndex', 'startPage',
'language', 'outputEncoding', 'inputEncoding']
def __init__(self, format):
'''
Create a query object by passing it the url format obtained
from the opensearch Description.
'''
self.format = format
# unpack the url to a tuple
self.url_parts = urlparse(format)
# unpack the query string to a dictionary
self.query_string = parse_qs(self.url_parts[4])
# look for standard macros and create a mapping of the
# opensearch names to the service specific ones
# so q={searchTerms} will result in a mapping between searchTerms and q
self.macro_map = {}
for key,values in self.query_string.items():
# TODO eventually optional/required params should be
# distinguished somehow (the ones with/without trailing ?
macro = values[0].replace('{', '').replace('}', '').replace('?', '')
if macro in Query.standard_macros:
self.macro_map[macro] = key
def url(self):
# copy the original query string
query_string = dict(self.query_string)
# iterate through macros and set the position in the querystring
for macro, name in self.macro_map.items():
if hasattr(self, macro):
# set the name/value pair
query_string[name] = [getattr(self, macro)]
else:
# remove the name/value pair
del(query_string[name])
# copy the url parts and substitute in our new query string
url_parts = list(self.url_parts)
url_parts[4] = urlencode(query_string, 1)
# recompose and return url
return urlunparse(tuple(url_parts))
def has_macro(self, macro):
return self.macro_map.has_key(macro)
|
gpl-3.0
|
clofresh/xbmc-vhx
|
resources/lib/requests/sessions.py
|
4
|
8911
|
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
from .defaults import defaults
from .models import Request
from .hooks import dispatch_hook
from .utils import header_expand
from .packages.urllib3.poolmanager import PoolManager
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, basestring):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k,v) in local_kwarg.items():
if v is None:
del kwargs[k]
return kwargs
class Session(object):
"""A Requests session."""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'config']
def __init__(self,
headers=None,
cookies=None,
auth=None,
timeout=None,
proxies=None,
hooks=None,
params=None,
config=None,
verify=True):
self.headers = headers or {}
self.cookies = cookies or {}
self.auth = auth
self.timeout = timeout
self.proxies = proxies or {}
self.hooks = hooks or {}
self.params = params or {}
self.config = config or {}
self.verify = verify
for (k, v) in defaults.items():
self.config.setdefault(k, v)
self.poolmanager = PoolManager(
num_pools=self.config.get('pool_connections'),
maxsize=self.config.get('pool_maxsize')
)
# Set up a CookieJar to be used by default
self.cookies = {}
# Add passed cookies in.
if cookies is not None:
self.cookies.update(cookies)
def __repr__(self):
return '<requests-client at 0x%x>' % (id(self))
def __enter__(self):
return self
def __exit__(self, *args):
pass
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=False,
proxies=None,
hooks=None,
return_response=True,
config=None,
prefetch=False,
verify=None):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param return_response: (optional) If False, an un-sent Request object will returned.
:param config: (optional) A configuration dictionary.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
"""
method = str(method).upper()
# Default empty dicts for dict params.
cookies = {} if cookies is None else cookies
data = {} if data is None else data
files = {} if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
if verify is None:
verify = self.verify
# use session's hooks as defaults
for key, cb in self.hooks.iteritems():
hooks.setdefault(key, cb)
# Expand header values.
if headers:
for k, v in headers.items() or {}:
headers[k] = header_expand(v)
args = dict(
method=method,
url=url,
data=data,
params=params,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
hooks=hooks,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
config=config,
verify=verify,
_poolmanager=self.poolmanager
)
# Merge local kwargs with session kwargs.
for attr in self.__attrs__:
session_val = getattr(self, attr, None)
local_val = args.get(attr)
args[attr] = merge_kwargs(local_val, session_val)
# Arguments manipulation hook.
args = dispatch_hook('args', args['hooks'], args)
# Create the (empty) response.
r = Request(**args)
# Give the response some context.
r.session = self
# Don't send if asked nicely.
if not return_response:
return r
# Send the HTTP Request.
r.send(prefetch=prefetch)
# Send any cookies back up the to the session.
self.cookies.update(r.response.cookies)
# Return the response.
return r.response
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('get', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('options', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('head', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('post', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('put', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('delete', url, **kwargs)
def session(**kwargs):
"""Returns a :class:`Session` for context-management."""
return Session(**kwargs)
|
bsd-3-clause
|
m0ppers/arangodb
|
3rdParty/V8/V8-5.0.71.39/tools/perf-to-html.py
|
32
|
9164
|
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
python %prog
Convert a perf trybot JSON file into a pleasing HTML page. It can read
from standard input or via the --filename option. Examples:
cat results.json | %prog --title "ia32 results"
%prog -f results.json -t "ia32 results" -o results.html
'''
import commands
import json
import math
from optparse import OptionParser
import os
import shutil
import sys
import tempfile
PERCENT_CONSIDERED_SIGNIFICANT = 0.5
PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
def ComputeZ(baseline_avg, baseline_sigma, mean, n):
if baseline_sigma == 0:
return 1000.0;
return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
def ComputeProbability(z):
if z > 2.575829: # p 0.005: two sided < 0.01
return 0
if z > 2.326348: # p 0.010
return 0.01
if z > 2.170091: # p 0.015
return 0.02
if z > 2.053749: # p 0.020
return 0.03
if z > 1.959964: # p 0.025: two sided < 0.05
return 0.04
if z > 1.880793: # p 0.030
return 0.05
if z > 1.811910: # p 0.035
return 0.06
if z > 1.750686: # p 0.040
return 0.07
if z > 1.695397: # p 0.045
return 0.08
if z > 1.644853: # p 0.050: two sided < 0.10
return 0.09
if z > 1.281551: # p 0.100: two sided < 0.20
return 0.10
return 0.20 # two sided p >= 0.20
class Result:
def __init__(self, test_name, count, hasScoreUnits, result, sigma,
master_result, master_sigma):
self.result_ = float(result)
self.sigma_ = float(sigma)
self.master_result_ = float(master_result)
self.master_sigma_ = float(master_sigma)
self.significant_ = False
self.notable_ = 0
self.percentage_string_ = ""
# compute notability and significance.
if hasScoreUnits:
compare_num = 100*self.result_/self.master_result_ - 100
else:
compare_num = 100*self.master_result_/self.result_ - 100
if abs(compare_num) > 0.1:
self.percentage_string_ = "%3.1f" % (compare_num)
z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
p = ComputeProbability(z)
if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
self.significant_ = True
if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
self.notable_ = 1
elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
self.notable_ = -1
def result(self):
return self.result_
def sigma(self):
return self.sigma_
def master_result(self):
return self.master_result_
def master_sigma(self):
return self.master_sigma_
def percentage_string(self):
return self.percentage_string_;
def isSignificant(self):
return self.significant_
def isNotablyPositive(self):
return self.notable_ > 0
def isNotablyNegative(self):
return self.notable_ < 0
class Benchmark:
def __init__(self, name, data):
self.name_ = name
self.tests_ = {}
for test in data:
# strip off "<name>/" prefix
test_name = test.split("/")[1]
self.appendResult(test_name, data[test])
# tests is a dictionary of Results
def tests(self):
return self.tests_
def SortedTestKeys(self):
keys = self.tests_.keys()
keys.sort()
t = "Total"
if t in keys:
keys.remove(t)
keys.append(t)
return keys
def name(self):
return self.name_
def appendResult(self, test_name, test_data):
with_string = test_data["result with patch "]
data = with_string.split()
master_string = test_data["result without patch"]
master_data = master_string.split()
runs = int(test_data["runs"])
units = test_data["units"]
hasScoreUnits = units == "score"
self.tests_[test_name] = Result(test_name,
runs,
hasScoreUnits,
data[0], data[2],
master_data[0], master_data[2])
class BenchmarkRenderer:
def __init__(self, output_file):
self.print_output_ = []
self.output_file_ = output_file
def Print(self, str_data):
self.print_output_.append(str_data)
def FlushOutput(self):
string_data = "\n".join(self.print_output_)
print_output = []
if self.output_file_:
# create a file
with open(self.output_file_, "w") as text_file:
text_file.write(string_data)
else:
print(string_data)
def RenderOneBenchmark(self, benchmark):
self.Print("<h2>")
self.Print("<a name=\"" + benchmark.name() + "\">")
self.Print(benchmark.name() + "</a> <a href=\"#top\">(top)</a>")
self.Print("</h2>");
self.Print("<table class=\"benchmark\">")
self.Print("<thead>")
self.Print(" <th>Test</th>")
self.Print(" <th>Result</th>")
self.Print(" <th>Master</th>")
self.Print(" <th>%</th>")
self.Print("</thead>")
self.Print("<tbody>")
tests = benchmark.tests()
for test in benchmark.SortedTestKeys():
t = tests[test]
self.Print(" <tr>")
self.Print(" <td>" + test + "</td>")
self.Print(" <td>" + str(t.result()) + "</td>")
self.Print(" <td>" + str(t.master_result()) + "</td>")
t = tests[test]
res = t.percentage_string()
if t.isSignificant():
res = self.bold(res)
if t.isNotablyPositive():
res = self.green(res)
elif t.isNotablyNegative():
res = self.red(res)
self.Print(" <td>" + res + "</td>")
self.Print(" </tr>")
self.Print("</tbody>")
self.Print("</table>")
def ProcessJSONData(self, data, title):
self.Print("<h1>" + title + "</h1>")
self.Print("<ul>")
for benchmark in data:
if benchmark != "errors":
self.Print("<li><a href=\"#" + benchmark + "\">" + benchmark + "</a></li>")
self.Print("</ul>")
for benchmark in data:
if benchmark != "errors":
benchmark_object = Benchmark(benchmark, data[benchmark])
self.RenderOneBenchmark(benchmark_object)
def bold(self, data):
return "<b>" + data + "</b>"
def red(self, data):
return "<font color=\"red\">" + data + "</font>"
def green(self, data):
return "<font color=\"green\">" + data + "</font>"
def PrintHeader(self):
data = """<html>
<head>
<title>Output</title>
<style type="text/css">
/*
Style inspired by Andy Ferra's gist at https://gist.github.com/andyferra/2554919
*/
body {
font-family: Helvetica, arial, sans-serif;
font-size: 14px;
line-height: 1.6;
padding-top: 10px;
padding-bottom: 10px;
background-color: white;
padding: 30px;
}
h1, h2, h3, h4, h5, h6 {
margin: 20px 0 10px;
padding: 0;
font-weight: bold;
-webkit-font-smoothing: antialiased;
cursor: text;
position: relative;
}
h1 {
font-size: 28px;
color: black;
}
h2 {
font-size: 24px;
border-bottom: 1px solid #cccccc;
color: black;
}
h3 {
font-size: 18px;
}
h4 {
font-size: 16px;
}
h5 {
font-size: 14px;
}
h6 {
color: #777777;
font-size: 14px;
}
p, blockquote, ul, ol, dl, li, table, pre {
margin: 15px 0;
}
li p.first {
display: inline-block;
}
ul, ol {
padding-left: 30px;
}
ul :first-child, ol :first-child {
margin-top: 0;
}
ul :last-child, ol :last-child {
margin-bottom: 0;
}
table {
padding: 0;
}
table tr {
border-top: 1px solid #cccccc;
background-color: white;
margin: 0;
padding: 0;
}
table tr:nth-child(2n) {
background-color: #f8f8f8;
}
table tr th {
font-weight: bold;
border: 1px solid #cccccc;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr td {
border: 1px solid #cccccc;
text-align: left;
margin: 0;
padding: 6px 13px;
}
table tr th :first-child, table tr td :first-child {
margin-top: 0;
}
table tr th :last-child, table tr td :last-child {
margin-bottom: 0;
}
</style>
</head>
<body>
"""
self.Print(data)
def PrintFooter(self):
data = """</body>
</html>
"""
self.Print(data)
def Render(opts, args):
if opts.filename:
with open(opts.filename) as json_data:
data = json.load(json_data)
else:
# load data from stdin
data = json.load(sys.stdin)
if opts.title:
title = opts.title
elif opts.filename:
title = opts.filename
else:
title = "Benchmark results"
renderer = BenchmarkRenderer(opts.output)
renderer.PrintHeader()
renderer.ProcessJSONData(data, title)
renderer.PrintFooter()
renderer.FlushOutput()
if __name__ == '__main__':
parser = OptionParser(usage=__doc__)
parser.add_option("-f", "--filename", dest="filename",
help="Specifies the filename for the JSON results "
"rather than reading from stdin.")
parser.add_option("-t", "--title", dest="title",
help="Optional title of the web page.")
parser.add_option("-o", "--output", dest="output",
help="Write html output to this file rather than stdout.")
(opts, args) = parser.parse_args()
Render(opts, args)
|
apache-2.0
|
savoirfairelinux/OpenUpgrade
|
openerp/addons/base/module/wizard/base_module_upgrade.py
|
40
|
4914
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
ir_model_data = self.pool.get('ir.model.data')
__, res_id = ir_model_data.get_object_reference(cr, uid, 'base', 'view_base_module_upgrade_install')
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'views': [(res_id, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
savoirfairelinux/OpenUpgrade
|
openerp/tools/float_utils.py
|
151
|
9267
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
|
agpl-3.0
|
prutseltje/ansible
|
lib/ansible/modules/cloud/amazon/aws_caller_facts.py
|
12
|
2484
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_caller_facts
short_description: Get facts about the user and account being used to make AWS calls.
description:
- This module returns information about the accont and user / role that the AWS access tokens are from.
- The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
version_added: "2.6"
author: Ed Costello (@orthanc)
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Get the current caller identity facts
aws_caller_facts:
register: caller_facts
'''
RETURN = '''
account:
description: The account id the access credentials are associated with.
returned: success
type: string
sample: "123456789012"
arn:
description: The arn identifying the user the credentials are associated with.
returned: success
type: string
sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
user_id:
description: |
The user id the access credentials are associated with. Note that this may not correspond to
anything you can look up in the case of roles or federated identities.
returned: success
type: string
sample: 123456789012:my-federated-user-name
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def main():
module = AnsibleAWSModule(
argument_spec={},
supports_check_mode=True,
)
client = module.client('sts')
try:
caller_identity = client.get_caller_identity()
caller_identity.pop('ResponseMetadata', None)
module.exit_json(
changed=False,
**camel_dict_to_snake_dict(caller_identity)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
if __name__ == '__main__':
main()
|
gpl-3.0
|
bankonmeOS/cjdns
|
node_build/dependencies/cnacl/crypto_sign/ed25519/ref10/base2.py
|
77
|
1231
|
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def expmod(b,e,m):
if e == 0: return 1
t = expmod(b,e/2,m)**2 % m
if e & 1: t = (t*b) % m
return t
def inv(x):
return expmod(x,q-2,q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)/4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)/8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
def edwards(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
return [x3 % q,y3 % q]
def radix255(x):
x = x % q
if x + x > q: x -= q
x = [x,0,0,0,0,0,0,0,0,0]
bits = [26,25,26,25,26,25,26,25,26,25]
for i in range(9):
carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i]
x[i] -= carry * 2**bits[i]
x[i + 1] += carry
result = ""
for i in range(9):
result = result+str(x[i])+","
result = result+str(x[9])
return result
Bi = B
for i in range(8):
print " {"
print " {",radix255(Bi[1]+Bi[0]),"},"
print " {",radix255(Bi[1]-Bi[0]),"},"
print " {",radix255(2*d*Bi[0]*Bi[1]),"},"
print " },"
Bi = edwards(B,edwards(B,Bi))
|
gpl-3.0
|
orcasgit/django-template-field
|
setup.py
|
1
|
2109
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import templatefield
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = templatefield.__version__
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
python_version = (sys.version_info.major, sys.version_info.minor)
setup(
name='django-template-field',
version=version,
description="""A Django fitemplate field twith managers to return the rendered or unrendered template.""",
long_description=readme + '\n\n' + history,
author='Jess Johnson',
author_email='jess@grokcode.com',
url='https://github.com/orcasgit/django-template-field',
packages=[
'templatefield',
],
include_package_data=True,
install_requires=['django<1.9'] if python_version == (3, 3) else [],
license="BSD",
zip_safe=False,
keywords='django-template-field',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
|
bsd-3-clause
|
clemenshage/grslra
|
experiments/plot_lpnorms.py
|
1
|
1564
|
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams.update({'text.usetex': True})
def lpnorm_scaled(x, p, mu):
return (lpnorm(x, p, mu) - lpnorm(0, p, mu)) / (lpnorm(1, p, mu) - lpnorm(0, p, mu))
def lpnorm(x, p, mu):
return (mu + x * x) ** (p / 2.0)
pvalues=[2.0, 1.0, 0.7, 0.4, 0.1]
mu = 1e-12
colors=['k', 'b', 'g', 'r', 'm']
x = np.linspace(-1, 1, 1001)
plt.figure(figsize=(15,8))
for i in xrange(pvalues.__len__()):
p = pvalues[i]
plt.plot(x, lpnorm_scaled(x, p, mu), color=colors[i], label='$p={:1.1f}$'.format(pvalues[i]), linewidth=3)
plt.legend()
axes = plt.gca()
axes.set_ylim([0,1])
axes.set_xlim([-1,1])
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.tight_layout()
plt.legend()
plt.savefig('lpnorm_fixedmu.pdf', dpi=200)
muvalues=[0.01, 1e-3, 1e-4]
labels = ["$\\ell_2$", "$\\ell_1$", "$\\mu=0.01$", "$\\mu=0.001$", "$\\mu=10^{-4}$"]
plt.figure(figsize=(15,8))
plt.plot(x, lpnorm_scaled(x, 2.0, mu), color=colors[0], label=labels[0], linewidth=3)
plt.plot(x, lpnorm_scaled(x, 1.0, mu), color=colors[1], label=labels[1], linewidth=3)
for i in xrange(muvalues.__len__()):
mu = muvalues[i]
plt.plot(x, lpnorm_scaled(x, 0.1, mu), color=colors[i+2], label=labels[i+2], linewidth=3)
plt.legend()
axes = plt.gca()
axes.set_ylim([0,1])
axes.set_xlim([-1,1])
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.tight_layout()
plt.legend(loc="lower left")
plt.savefig('lpnorm_fixedp.pdf', dpi=200)
|
mit
|
simonwangw/googlemock
|
scripts/upload.py
|
2511
|
51024
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
vladpopovici/WSItk
|
WSItk/tools/wsi_bot_apply.py
|
2
|
4768
|
# -*- coding: utf-8 -*-
"""
WSI_BOT_APPLY
Assigns all patches in an image to one of the clusters in the codebook.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
__author__ = 'Vlad Popovici'
__version__ = 0.01
import argparse as opt
from ConfigParser import SafeConfigParser
import ast
import numpy as np
import skimage.io
from util.storage import ModelPersistence
from util.configs import read_local_descriptors_cfg
from util.explore import sliding_window_on_regions
from util.misc import intg_image
from descriptors.txtgrey import HaarLikeDescriptor
from stain.he import rgb2he2
def main():
p = opt.ArgumentParser(description="""
Assigns the regions of an image to the clusters of a codebook.
""")
p.add_argument('image', action='store', help='image file name')
p.add_argument('config', action='store', help='a configuration file')
p.add_argument('-r', '--roi', action='store', nargs=4, type=int,
help='region of interest from the image as: row_min row_max col_min col_max',
default=None)
args = p.parse_args()
img_file = args.image
cfg_file = args.config
image_orig = skimage.io.imread(img_file)
if image_orig.ndim == 3:
im_h, _, _ = rgb2he2(image_orig)
if args.roi is None:
roi = (0, im_h.shape[0]-1, 0, im_h.shape[1]-1)
else:
roi = args.roi
# Process configuration file:
parser = SafeConfigParser()
parser.read(cfg_file)
if not parser.has_section('data'):
raise RuntimeError('Section [data] is mandatory')
wsize = (32, 32)
if parser.has_option('data', 'window_size'):
wsize = ast.literal_eval(parser.get('data', 'window_size'))
if not parser.has_option('data', 'model'):
raise RuntimeError('model file name is missing in [data] section')
model_file = parser.get('data', 'model')
with ModelPersistence(model_file, 'r', format='pickle') as mp:
codebook = mp['codebook']
Xm = mp['shift']
Xs = mp['scale']
standardize = mp['standardize']
if parser.has_option('data', 'output'):
out_file = parser.get('data', 'output')
else:
out_file = 'output.dat'
descriptors = read_local_descriptors_cfg(parser)
# For the moment, it is assumed tha only one type of local descriptors is
# used - no composite feature vectors. This will change in the future but,
# for the moment only the first type of descriptor in "descriptors" list
# is used, and the codebook is assumed to be constructed using the same.
desc = descriptors[0]
print(img_file)
print(wsize)
print(roi[0], roi[1], roi[2], roi[3])
w_offset = (0, 0)
if isinstance(desc, HaarLikeDescriptor):
# this one works on integral images
image = intg_image(im_h)
# the sliding window should also be increased by 1:
w_offset = (1, 1)
wsize = (wsize[0] + w_offset[0], wsize[1] + w_offset[1])
else:
image = im_h
itw = sliding_window_on_regions(image.shape, [tuple(roi)], wsize, step=wsize)
wnd = []
labels = []
buff_size = 10000 # every <buff_size> patches we do a classification
X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))
k = 0
if standardize: # placed here, to avoid testing inside the loop
for r in itw:
# adjust if needed:
r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
wnd.append(r2)
X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
k += 1
if k == buff_size:
X = (X - Xm) / Xs
labels.extend(codebook.predict(X).tolist())
k = 0 # reset the block
else:
for r in itw:
# adjust if needed:
r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
wnd.append(r2)
X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
k += 1
if k == buff_size:
labels.extend(codebook.predict(X).tolist())
k = 0 # reset the block
if k != 0:
# it means some data is accumulated in X but not yet classified
if standardize:
X[0:k+1,] = (X[0:k+1,] - Xm) / Xs
labels.extend(codebook.predict(X[0:k+1,]).tolist())
with open(out_file, 'w') as f:
n = len(wnd) # total number of descriptors of this type
for k in range(n):
s = '\t'.join([str(x_) for x_ in wnd[k]]) + '\t' + str(labels[k]) + '\n'
f.write(s)
if __name__ == '__main__':
main()
|
mit
|
Juniper/tempest
|
tempest/lib/api_schema/response/compute/v2_1/interfaces.py
|
13
|
2276
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
interface_common_info = {
'type': 'object',
'properties': {
'port_state': {'type': 'string'},
'fixed_ips': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'subnet_id': {
'type': 'string',
'format': 'uuid'
},
'ip_address': parameter_types.ip_address
},
'additionalProperties': False,
'required': ['subnet_id', 'ip_address']
}
},
'port_id': {'type': 'string', 'format': 'uuid'},
'net_id': {'type': 'string', 'format': 'uuid'},
'mac_addr': parameter_types.mac_address
},
'additionalProperties': False,
'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
}
get_create_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachment': interface_common_info
},
'additionalProperties': False,
'required': ['interfaceAttachment']
}
}
list_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachments': {
'type': 'array',
'items': interface_common_info
}
},
'additionalProperties': False,
'required': ['interfaceAttachments']
}
}
delete_interface = {
'status_code': [202]
}
|
apache-2.0
|
naojsoft/qplan
|
qplan/plugins/AirMassChart.py
|
1
|
4344
|
#
# AirMassChart.py -- AirMass chart plugin
#
# Eric Jeschke (eric@naoj.org)
#
from datetime import timedelta
#from dateutil import tz
from ginga.gw import Widgets, Plot
from ginga.misc import Bunch
from qplan.plugins import PlBase
from qplan.plots.airmass import AirMassPlot
class AirMassChart(PlBase.Plugin):
def __init__(self, controller):
super(AirMassChart, self).__init__(controller)
self.schedules = {}
self.initialized = False
# Set preferred timezone for plot
#self.tz = tz.UTC
sdlr = self.model.get_scheduler()
self.tz = sdlr.timezone
sdlr.add_callback('schedule-cleared', self.clear_schedule_cb)
sdlr.add_callback('schedule-added', self.new_schedule_cb)
self.model.add_callback('schedule-selected', self.show_schedule_cb)
def build_gui(self, container):
self.plot = AirMassPlot(700, 500, logger=self.logger)
plot_w = Plot.PlotWidget(self.plot, width=700, height=500)
container.set_margins(2, 2, 2, 2)
container.set_spacing(4)
container.add_widget(plot_w, stretch=1)
def show_schedule_cb(self, qmodel, schedule):
try:
info = self.schedules[schedule]
if not self.initialized:
self.plot.setup()
self.initialized = True
if info.num_tgts == 0:
self.logger.debug("no targets for plotting airmass")
self.view.gui_call(self.plot.clear)
else:
self.logger.debug("plotting airmass")
self.view.gui_call(self.plot.clear)
site = info.site
target_data = info.target_data
# Plot a subset of the targets
idx = int((self.controller.idx_tgt_plots / 100.0) * len(target_data))
num_tgts = self.controller.num_tgt_plots
target_data = target_data[idx:idx+num_tgts]
self.view.error_wrap(self.plot.plot_altitude, site, target_data,
self.tz)
self.view.error_wrap(self.plot.draw)
## except KeyError:
## pass
except Exception as e:
self.logger.error("Error plotting airmass: %s" % (str(e)))
return True
def add_schedule(self, schedule):
self.logger.debug("adding schedule %s" % (schedule))
start_time = schedule.start_time
sdlr = self.model.get_scheduler()
# calc noon on the day of observation in sdlr time zone
ndate = start_time.strftime("%Y-%m-%d") + " 12:00:00"
site = sdlr.site
noon_time = site.get_date(ndate, timezone=sdlr.timezone)
# plot period 15 minutes before sunset to 15 minutes after sunrise
delta = 60*15
start_time = site.sunset(noon_time) - timedelta(0, delta)
stop_time = site.sunrise(start_time) + timedelta(0, delta)
targets = []
site.set_date(start_time)
for slot in schedule.slots:
ob = slot.ob
if (ob is not None) and (not ob.derived):
# not an OB generated to serve another OB
# TODO: make sure targets are unique in pointing
targets.append(ob.target)
# make airmass plot
num_tgts = len(targets)
target_data = []
lengths = []
if num_tgts > 0:
for tgt in targets:
## info_list = site.get_target_info(tgt,
## start_time=start_time)
info_list = site.get_target_info(tgt)
target_data.append(Bunch.Bunch(history=info_list, target=tgt))
lengths.append(len(info_list))
# clip all arrays to same length
min_len = 0
if len(lengths) > 0:
min_len = min(lengths)
for il in target_data:
il.history = il.history[:min_len]
self.schedules[schedule] = Bunch.Bunch(site=site, num_tgts=num_tgts,
target_data=target_data)
def new_schedule_cb(self, qscheduler, schedule):
self.add_schedule(schedule)
def clear_schedule_cb(self, qscheduler):
self.view.gui_call(self.plot.clear)
self.logger.info("cleared plot")
return True
#END
|
bsd-3-clause
|
skywin/p2pool
|
p2pool/test/bitcoin/test_data.py
|
272
|
4635
|
import unittest
from p2pool.bitcoin import data, networks
from p2pool.util import pack
class Test(unittest.TestCase):
def test_header_hash(self):
assert data.hash256(data.block_header_type.pack(dict(
version=1,
previous_block=0x000000000000038a2a86b72387f93c51298298a732079b3b686df3603d2f6282,
merkle_root=0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44,
timestamp=1323752685,
bits=data.FloatingInteger(437159528),
nonce=3658685446,
))) == 0x000000000000003aaaf7638f9f9c0d0c60e8b0eb817dcdb55fd2b1964efc5175
def test_header_hash_litecoin(self):
assert networks.nets['litecoin'].POW_FUNC(data.block_header_type.pack(dict(
version=1,
previous_block=0xd928d3066613d1c9dd424d5810cdd21bfeef3c698977e81ec1640e1084950073,
merkle_root=0x03f4b646b58a66594a182b02e425e7b3a93c8a52b600aa468f1bc5549f395f16,
timestamp=1327807194,
bits=data.FloatingInteger(0x1d01b56f),
nonce=20736,
))) < 2**256//2**30
def test_tx_hash(self):
assert data.hash256(data.tx_type.pack(dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script='70736a0468860e1a0452389500522cfabe6d6d2b2f33cf8f6291b184f1b291d24d82229463fcec239afea0ee34b4bfc622f62401000000000000004d696e656420627920425443204775696c6420ac1eeeed88'.decode('hex'),
)],
tx_outs=[dict(
value=5003880250,
script=data.pubkey_hash_to_script2(pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))),
)],
lock_time=0,
))) == 0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c
def test_address_to_pubkey_hash(self):
assert data.address_to_pubkey_hash('1KUCp7YP5FP8ViRxhfszSUJCTAajK6viGy', networks.nets['bitcoin']) == pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))
def test_merkle_hash(self):
assert data.merkle_hash([
0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c,
0x326dfe222def9cf571af37a511ccda282d83bedcc01dabf8aa2340d342398cf0,
0x5d2e0541c0f735bac85fa84bfd3367100a3907b939a0c13e558d28c6ffd1aea4,
0x8443faf58aa0079760750afe7f08b759091118046fe42794d3aca2aa0ff69da2,
0x4d8d1c65ede6c8eab843212e05c7b380acb82914eef7c7376a214a109dc91b9d,
0x1d750bc0fa276f89db7e6ed16eb1cf26986795121f67c03712210143b0cb0125,
0x5179349931d714d3102dfc004400f52ef1fed3b116280187ca85d1d638a80176,
0xa8b3f6d2d566a9239c9ad9ae2ed5178dee4a11560a8dd1d9b608fd6bf8c1e75,
0xab4d07cd97f9c0c4129cff332873a44efdcd33bdbfc7574fe094df1d379e772f,
0xf54a7514b1de8b5d9c2a114d95fba1e694b6e3e4a771fda3f0333515477d685b,
0x894e972d8a2fc6c486da33469b14137a7f89004ae07b95e63923a3032df32089,
0x86cdde1704f53fce33ab2d4f5bc40c029782011866d0e07316d695c41e32b1a0,
0xf7cf4eae5e497be8215778204a86f1db790d9c27fe6a5b9f745df5f3862f8a85,
0x2e72f7ddf157d64f538ec72562a820e90150e8c54afc4d55e0d6e3dbd8ca50a,
0x9f27471dfbc6ce3cbfcf1c8b25d44b8d1b9d89ea5255e9d6109e0f9fd662f75c,
0x995f4c9f78c5b75a0c19f0a32387e9fa75adaa3d62fba041790e06e02ae9d86d,
0xb11ec2ad2049aa32b4760d458ee9effddf7100d73c4752ea497e54e2c58ba727,
0xa439f288fbc5a3b08e5ffd2c4e2d87c19ac2d5e4dfc19fabfa33c7416819e1ec,
0x3aa33f886f1357b4bbe81784ec1cf05873b7c5930ab912ee684cc6e4f06e4c34,
0xcab9a1213037922d94b6dcd9c567aa132f16360e213c202ee59f16dde3642ac7,
0xa2d7a3d2715eb6b094946c6e3e46a88acfb37068546cabe40dbf6cd01a625640,
0x3d02764f24816aaa441a8d472f58e0f8314a70d5b44f8a6f88cc8c7af373b24e,
0xcc5adf077c969ebd78acebc3eb4416474aff61a828368113d27f72ad823214d0,
0xf2d8049d1971f02575eb37d3a732d46927b6be59a18f1bd0c7f8ed123e8a58a,
0x94ffe8d46a1accd797351894f1774995ed7df3982c9a5222765f44d9c3151dbb,
0x82268fa74a878636261815d4b8b1b01298a8bffc87336c0d6f13ef6f0373f1f0,
0x73f441f8763dd1869fe5c2e9d298b88dc62dc8c75af709fccb3622a4c69e2d55,
0xeb78fc63d4ebcdd27ed618fd5025dc61de6575f39b2d98e3be3eb482b210c0a0,
0x13375a426de15631af9afdf00c490e87cc5aab823c327b9856004d0b198d72db,
0x67d76a64fa9b6c5d39fde87356282ef507b3dec1eead4b54e739c74e02e81db4,
]) == 0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44
|
gpl-3.0
|
pre-commit/pre-commit
|
tests/languages/ruby_test.py
|
1
|
2645
|
import os.path
import tarfile
from unittest import mock
import pytest
import pre_commit.constants as C
from pre_commit import parse_shebang
from pre_commit.languages import ruby
from pre_commit.prefix import Prefix
from pre_commit.util import cmd_output
from pre_commit.util import resource_bytesio
from testing.util import xfailif_windows
ACTUAL_GET_DEFAULT_VERSION = ruby.get_default_version.__wrapped__
@pytest.fixture
def find_exe_mck():
with mock.patch.object(parse_shebang, 'find_executable') as mck:
yield mck
def test_uses_default_version_when_not_available(find_exe_mck):
find_exe_mck.return_value = None
assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
def test_uses_system_if_both_gem_and_ruby_are_available(find_exe_mck):
find_exe_mck.return_value = '/path/to/exe'
assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
@pytest.fixture
def fake_gem_prefix(tmpdir):
gemspec = '''\
Gem::Specification.new do |s|
s.name = 'pre_commit_placeholder_package'
s.version = '0.0.0'
s.summary = 'placeholder gem for pre-commit hooks'
s.authors = ['Anthony Sottile']
end
'''
tmpdir.join('placeholder_gem.gemspec').write(gemspec)
yield Prefix(tmpdir)
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_system(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, 'system', ())
# Should be able to activate and use rbenv install
with ruby.in_env(fake_gem_prefix, 'system'):
_, out, _ = cmd_output('gem', 'list')
assert 'pre_commit_placeholder_package' in out
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_default(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, C.DEFAULT, ())
# Should have created rbenv directory
assert os.path.exists(fake_gem_prefix.path('rbenv-default'))
# Should be able to activate using our script and access rbenv
with ruby.in_env(fake_gem_prefix, 'default'):
cmd_output('rbenv', '--help')
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_with_version(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, '2.7.2', ())
# Should be able to activate and use rbenv install
with ruby.in_env(fake_gem_prefix, '2.7.2'):
cmd_output('rbenv', 'install', '--help')
@pytest.mark.parametrize(
'filename',
('rbenv.tar.gz', 'ruby-build.tar.gz', 'ruby-download.tar.gz'),
)
def test_archive_root_stat(filename):
with resource_bytesio(filename) as f:
with tarfile.open(fileobj=f) as tarf:
root, _, _ = filename.partition('.')
assert oct(tarf.getmember(root).mode) == '0o755'
|
mit
|
minixalpha/spark
|
python/pyspark/mllib/tree.py
|
46
|
24110
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import random
from pyspark import SparkContext, RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['DecisionTreeModel', 'DecisionTree', 'RandomForestModel',
'RandomForest', 'GradientBoostedTreesModel', 'GradientBoostedTrees']
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
""" Summary of model """
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
""" Full model """
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
@since("1.1.0")
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
""" summary of model. """
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
""" full model. """
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
minInstancesPerNode=1, minInfoGain=0.0):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
return DecisionTreeModel(model)
@classmethod
@since("1.1.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo,
impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for classification.
:param data:
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@classmethod
@since("1.1.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(cls, data, algo, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc("trainRandomForestModel", data, algo, numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
return RandomForestModel(model)
@classmethod
@since("1.2.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
@classmethod
@since("1.2.0")
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto",
impurity="variance", maxDepth=4, maxBins=32, seed=None):
"""
Train a random forest model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(cls, data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainGradientBoostedTreesModel", data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
return GradientBoostedTreesModel(model)
@classmethod
@since("1.3.0")
def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
@classmethod
@since("1.3.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.tree tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
sxpert/ansible-modules-core
|
packaging/os/rhn_register.py
|
122
|
12900
|
#!/usr/bin/python
# (c) James Laska
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_url:
description:
- Specify an alternative Red Hat Network server URL
required: False
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
profilename:
description:
- supply an profilename for use with registration
required: False
default: null
version_added: "2.0"
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
required: false
default: []
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register: state=absent username=joe_user password=somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register: state=present username=joe_user password=somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register: state=present activationkey=1-222333444 enable_eus=true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register: >
state=present
username=joe_user
password=somepass
server_url=https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register: state=present username=joe_user
password=somepass
channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
import types
import xmlrpclib
import urlparse
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
except ImportError, e:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e)
# INSERT REDHAT SNIPPETS
from ansible.module_utils.redhat import *
# INSERT COMMON SNIPPETS
from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
def __init__(self, username=None, password=None):
RegistrationBase.__init__(self, username, password)
self.config = self.load_config()
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
self.config = up2date_client.config.initUp2dateConfig()
# Add support for specifying a default value w/o having to standup some
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
# ignore pep8 W601 errors for this line
# setting this to use 'in' does not work in the rhn library
if self.has_key(key):
return self[key]
else:
return default
self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config)
return self.config
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urlparse.urlparse(self.config['serverURL'])
return url[1].replace('xmlrpc.','')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
pass
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure(self, server_url):
'''
Configure system for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None, profilename=None):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password)
if self.module.params.get('server_url', None):
register_cmd += " --serverUrl=%s" % self.module.params.get('server_url')
if enable_eus:
register_cmd += " --use-eus-channel"
if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey
if profilename is not None:
register_cmd += " --profilename '%s'" % profilename
# FIXME - support --systemorgid
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels=[]):
if len(channels) <= 0:
return
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, new_channels)
def _subscribe(self, channels=[]):
'''
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def main():
# Read system RHN configuration
rhn = Rhn()
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
activationkey = dict(default=None, required=False),
profilename = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'),
)
)
state = module.params['state']
rhn.username = module.params['username']
rhn.password = module.params['password']
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
profilename = module.params['profilename']
channels = module.params['channels']
rhn.module = module
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey)
rhn.subscribe(channels)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
main()
|
gpl-3.0
|
denbedilov/ATTENDER
|
server/attender-mobile/lib/requests/packages/chardet/charsetgroupprober.py
|
2929
|
3791
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
|
mit
|
radianbaskoro/mathdoku-solver
|
mathdokusolver/Solver.py
|
1
|
13846
|
'''
MathDoku solver module.
@author: Radian Baskoro
'''
import itertools
from datetime import datetime
from Utility import Utility
class Solver:
'''
Solver class used to solve the MathDoku problem.
'''
__debugLevel = 0
__iterationCount = 0
__initFlag = False
boardSize = 0
cages = None
def __init__(self, debugLevel=0):
'''
Constructor for Solver class.
Accepts debugLevel:
0 - No debug information
1 - Number of iterations and elapsed time only
2 - All information
'''
self.__debugLevel = debugLevel
def initializeFromFile(self, filePath):
'''
Initializes the problem from file.
'''
inputDataFile = open(filePath, 'r')
inputData = ''.join(inputDataFile.readlines())
inputDataFile.close()
self.initialize(inputData)
def initialize(self, inputData):
'''
Initializes the problem from string.
'''
lines = inputData.split('\n')
parts = lines[0].split()
self.boardSize = int(parts[0])
cageCount = int(parts[1])
self.cages = [None]*cageCount
try:
for i in range(self.boardSize+1, self.boardSize+cageCount+1):
parts = lines[i].split()
cage = Cage()
cage.op = parts[1]
cage.value = int(parts[2])
self.cages[int(parts[0])] = cage
for i in range(0, self.boardSize):
parts = lines[i+1].split()
for j in range(0, self.boardSize):
cageIndex = int(parts[j])
self.cages[cageIndex].cells.append([i,j])
except Exception:
raise InputError('Expecting board size of %d and %d cages.' % (self.boardSize, cageCount))
self.__validate()
self.__initFlag = True
def solve(self):
'''
Solves the initialized problem.
Returns the solution in a 2 dimensional list or None if no solution is found.
'''
if not self.__initFlag: raise SolverError('Solver not initialized')
startTime = datetime.now()
domain = [[range(1, self.boardSize+1) for y in range(0, self.boardSize)] for x in range(0, self.boardSize)]
conf = [[None]*self.boardSize for x in range(0, self.boardSize)]
for cage in self.cages:
# First pass - assign all single cell cages
if len(cage.cells) == 1:
x = cage.cells[0][0]
y = cage.cells[0][1]
self.__setValue(domain, conf, x, y, cage.value)
# Second pass - heuristics
# Cage using multiplication operation: domains must be a factor of the cage value
elif cage.op == Op.MULTIPLICATION:
for cell in cage.cells:
x = cell[0]
y = cell[1]
toRemove = set()
for d in domain[x][y]:
if cage.value % d != 0:
toRemove.add(d)
domain[x][y] = list(set(domain[x][y])-toRemove)
# Propagating constraints also removes all infeasible number combinations from each cage
self.__propagateConstraints(domain, conf)
# Third pass - greedy
tree = SolverNode(domain, conf)
if self.__solveTree(tree):
if self.__debugLevel >= 1:
elapsedTime = datetime.now() - startTime
print 'Solved in %d iterations.' % self.__iterationCount
print 'Elapsed time: %d seconds %d microseconds' % \
(elapsedTime.seconds, elapsedTime.microseconds)
return tree.conf
else:
if self.__debugLevel >= 1:
print 'No solution found.'
return None
def __solveTree(self, node):
'''
Recursively solve the problem tree.
'''
self.__iterationCount += 1
if self.__debugLevel >= 2:
print '----------------'
print 'Iteration %d' % self.__iterationCount
print '----------------'
print Utility.formatSolution(node.conf)
# Assign next value, and check constraints
if node.value != None:
self.__setValue(node.domain, node.conf, node.x, node.y, node.value)
if not ConstraintStore.checkConstraints(self.boardSize, self.cages, node.domain, node.conf, debugLevel=self.__debugLevel):
return False
# Done if all cells are assigned
unassignedCells = filter(lambda x: node.conf[x[0]][x[1]] == None, itertools.product(range(0, self.boardSize), range(0, self.boardSize)))
if len(unassignedCells) == 0:
if self.__debugLevel >= 2: print 'Solution found!'
return True
# Get next cell
unassignedCells = sorted(unassignedCells, key=lambda x: len(node.domain[x[0]][x[1]]))
cell = unassignedCells[0]
x = cell[0]
y = cell[1]
# Try each domain value
for value in node.domain[x][y]:
childNode = SolverNode(node.domain, node.conf, x, y, value)
if self.__solveTree(childNode):
node.domain = childNode.domain
node.conf = childNode.conf
return True
# No solution found in this subtree
return False
def __setValue(self, domain, conf, x, y, value):
'''
Sets the value at the given location and propagate constraints.
'''
conf[x][y] = value
domain[x][y] = [value]
if self.__debugLevel >= 2: print '(%d,%d) = %d' % (x, y, value)
self.__propagateConstraints(domain, conf, x=x, y=y, value=value)
def __propagateConstraints(self, domain, conf, x=None, y=None, value=None):
'''
Limits the domain values based on the known constraints.
'''
removeCount = 0
if x != None and y != None and value != None:
for i in range(0, self.boardSize):
if y != i:
# Propagate to row
d = domain[x][i]
if value in d:
removeCount += 1
d.remove(value)
if len(d) == 1:
self.__setValue(domain, conf, x, i, d[0])
if x != i:
# Propagate to column
d = domain[i][y]
if value in d:
removeCount += 1
d.remove(value)
if len(d) == 1:
self.__setValue(domain, conf, i, y, d[0])
# Propagate to all cages
for cage in self.cages:
cellCount = len(cage.cells)
if len(cage.cells) > 1:
# Try every possible combination of the domain of each cells
# and only keep feasible values
d = tuple(map(lambda cell: domain[cell[0]][cell[1]], cage.cells))
feasibleDomain = [set() for x in range(0, cellCount)]
comb = list(itertools.product(*d))
for c in comb:
cageCalcValue = cage.func(*c)
if float(cage.value) == cageCalcValue:
for i in range(0, cellCount):
feasibleDomain[i].add(c[i])
for i in range(0, cellCount):
cell = cage.cells[i]
x = cell[0]
y = cell[1]
newDomain = list(set(domain[x][y])&feasibleDomain[i])
removeCount += len(domain[x][y])-len(newDomain)
domain[x][y] = newDomain
if self.__debugLevel >= 2: print "%d infeasible values removed from the domain." % removeCount
def __validate(self):
'''
Initial validation of the problem.
'''
# All cages' cells must be attached to one another.
for i in range(0, len(self.cages)):
cage = self.cages[i]
if len(cage.cells) > 1:
valid = [False]*len(cage.cells)
for i in range(0, len(cage.cells)):
if not valid[i]:
c1 = cage.cells[i]
for j in range(0, len(cage.cells)):
c2 = cage.cells[j]
if abs(c1[0]-c2[0])+abs(c1[1]-c2[1]) == 1:
valid[i] = True
valid[j] = True
if False in valid: raise InputError ('Cage #%d cells are not attached.' % i)
# Check for invalid operation
if not cage.op in [Op.ADDITION, Op.SUBTRACTION, Op.MULTIPLICATION, Op.DIVISION]: raise InputError ('Invalid operation in cage #%d: %s' % (i, cage.op))
class Cage:
'''
Represents a cage of MathDoku cells with an operation and value.
'''
cells = None
op = None
value = None
def __init__(self):
self.cells = list()
def func(self, *n):
'''
Executes the cage function on the given values tuple.
'''
values = sorted(n, reverse=True)
value = float(values[0])
for v in values[1:]:
if self.op == Op.ADDITION: value += v
elif self.op == Op.SUBTRACTION: value -= v
elif self.op == Op.MULTIPLICATION: value *= v
elif self.op == Op.DIVISION: value /= v
return value
class Op:
'''
Valid operators enumeration.
'''
ADDITION = '+'
SUBTRACTION = "-"
MULTIPLICATION = '*'
DIVISION = '/'
class SolverNode:
'''
Represents the solver tree node.
'''
domain = None
conf = None
x = None
y = None
value = None
def __init__(self, domain, conf, x=None, y=None, value=None):
self.domain = list()
self.conf = list()
for i in range(0, len(domain)):
dRow = list()
cRow = list(conf[i])
for j in range(0, len(domain)):
dRow.append(list(domain[i][j]))
self.domain.append(dRow)
self.conf.append(cRow)
self.x = x
self.y = y
self.value = value
class ConstraintStore:
'''
Constraint store static class for checking feasibility.
'''
@staticmethod
def checkConstraints(boardSize, cages, domain, conf, debugLevel=0):
'''
Returns true if none of the constraints are broken, false otherwise.
'''
completeDomain = set(range(1, boardSize+1))
for i in range(0, boardSize):
rowUsed = list()
colUsed = list()
rowDomain = set()
colDomain = set()
for j in range(0, boardSize):
# 1 - Row values are all different
value = conf[i][j]
if value != None:
if value in rowUsed:
if debugLevel >= 2: print 'Row #%d constraint violated.' % i
return False
else:
rowUsed.append(value)
# 2 - Column values are all different
value = conf[j][i]
if value != None:
if value in colUsed:
if debugLevel >= 2: print 'Column #%d constraint violated.' % i
return False
else:
colUsed.append(value)
rowDomain = rowDomain.union(set(domain[i][j]))
colDomain = colDomain.union(set(domain[j][i]))
#3 - Row must contain all numbers
if rowDomain != completeDomain:
if debugLevel >= 2: print 'Row #%i domain constraint violated.' % i
return False
#4 - Column must contain all numbers
if colDomain != completeDomain:
if debugLevel >= 2: print 'Column #%i domain constraint violated.' % i
return False
# 3 - Cage calculation is correct
for i in range(0, len(cages)):
cage = cages[i]
values = tuple(map(lambda x: conf[x[0]][x[1]], cage.cells))
if not None in values:
cageCalcValue = cage.func(*values)
if float(cage.value) != cageCalcValue:
if debugLevel >= 2: print 'Cage #%d constraint violated.' % i
return False
return True
class InputError(Exception):
'''
Represents an error in the problem space.
'''
def __init__(self, message):
self.message = message
class SolverError(Exception):
'''
Represents an error in the solver.
'''
def __init__(self, message):
self.message = message
|
apache-2.0
|
jhoos/django
|
django/contrib/admin/helpers.py
|
7
|
14344
|
from __future__ import unicode_literals
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.utils import (
display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(field in self.form.fields and
self.form.fields[field].widget.is_hidden
for field in self.fields)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
'\n'.join(self.form[f].errors.as_ul()
for f in self.fields if f not in self.readonly_fields).strip('\n')
)
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
self.is_readonly = False
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
self.empty_value_display = model_admin.get_empty_value_display()
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{}>{}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = self.empty_value_display
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
result_repr = linebreaksbr(result_repr)
else:
if isinstance(f.remote_field, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f, self.empty_value_display)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts, view_on_site_url=view_on_site_url)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
@cached_property
def original_content_type_id(self):
warnings.warn(
'InlineAdminForm.original_content_type_id is deprecated and will be '
'removed in Django 2.0. If you were using this attribute to construct '
'the "view on site" URL, use the `absolute_url` attribute instead.',
RemovedInDjango20Warning, stacklevel=2
)
if self.original is not None:
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(self.original).pk
raise AttributeError
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
|
bsd-3-clause
|
WoLpH/EventGhost
|
lib27/site-packages/requests/packages/chardet/euctwfreq.py
|
3133
|
34872
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
gpl-2.0
|
great-expectations/great_expectations
|
tests/execution_engine/test_sqlalchemy_execution_engine.py
|
1
|
23179
|
import logging
import os
import pandas as pd
import pytest
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.data_context.util import file_relative_path
from great_expectations.exceptions import GreatExpectationsError
from great_expectations.exceptions.exceptions import InvalidConfigError
from great_expectations.exceptions.metric_exceptions import MetricProviderError
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
# Function to test for spark dataframe equality
from great_expectations.self_check.util import build_sa_engine
from great_expectations.validator.validation_graph import MetricConfiguration
from tests.expectations.test_util import get_table_columns_metric
from tests.test_utils import get_sqlite_table_names, get_sqlite_temp_table_names
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
def test_instantiation_via_connection_string(sa, test_db_connection_string):
my_execution_engine = SqlAlchemyExecutionEngine(
connection_string=test_db_connection_string
)
assert my_execution_engine.connection_string == test_db_connection_string
assert my_execution_engine.credentials == None
assert my_execution_engine.url == None
my_execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
table_name="table_1",
schema_name="main",
sampling_method="_sample_using_limit",
sampling_kwargs={"n": 5},
)
)
def test_instantiation_via_url(sa):
db_file = file_relative_path(
__file__,
os.path.join("..", "test_sets", "test_cases_for_sql_data_connector.db"),
)
my_execution_engine = SqlAlchemyExecutionEngine(url="sqlite:///" + db_file)
assert my_execution_engine.connection_string is None
assert my_execution_engine.credentials is None
assert my_execution_engine.url[-36:] == "test_cases_for_sql_data_connector.db"
my_execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
table_name="table_partitioned_by_date_column__A",
sampling_method="_sample_using_limit",
sampling_kwargs={"n": 5},
)
)
def test_instantiation_via_credentials(sa, test_backends, test_df):
if "postgresql" not in test_backends:
pytest.skip("test_database_store_backend_get_url_for_key requires postgresql")
my_execution_engine = SqlAlchemyExecutionEngine(
credentials={
"drivername": "postgresql",
"username": "postgres",
"password": "",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"database": "test_ci",
}
)
assert my_execution_engine.connection_string is None
assert my_execution_engine.credentials == {
"username": "postgres",
"password": "",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "5432",
"database": "test_ci",
}
assert my_execution_engine.url is None
# Note Abe 20201116: Let's add an actual test of get_batch_data_and_markers, which will require setting up test
# fixtures
# my_execution_engine.get_batch_data_and_markers(batch_spec=BatchSpec(
# table_name="main.table_1",
# sampling_method="_sample_using_limit",
# sampling_kwargs={
# "n": 5
# }
# ))
def test_instantiation_error_states(sa, test_db_connection_string):
with pytest.raises(InvalidConfigError):
SqlAlchemyExecutionEngine()
# Testing batching of aggregate metrics
def test_sa_batch_aggregate_metrics(caplog, sa):
import datetime
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_1,
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_2,
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_3,
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
metric_dependencies={
"metric_partial_fn": desired_metric_4,
"table.columns": table_columns_metric,
},
)
caplog.clear()
caplog.set_level(logging.DEBUG, logger="great_expectations")
start = datetime.datetime.now()
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
end = datetime.datetime.now()
print("t1")
print(end - start)
assert results[desired_metric_1.id] == 3
assert results[desired_metric_2.id] == 1
assert results[desired_metric_3.id] == 4
assert results[desired_metric_4.id] == 4
# Check that all four of these metrics were computed on a single domain
found_message = False
for record in caplog.records:
if (
record.message
== "SqlAlchemyExecutionEngine computed 4 metrics on domain_id ()"
):
found_message = True
assert found_message
# Ensuring functionality of compute_domain when no domain kwargs are given
def test_get_compute_domain_with_no_domain_kwargs(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={}, domain_type="table"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing for only untested use case - column_pair
def test_get_compute_domain_with_column_pair(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Fetching data, compute_domain_kwargs, accessor_kwargs
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column_A": "a", "column_B": "b"}, domain_type="column_pair"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert (
"column_A" not in compute_kwargs.keys()
and "column_B" not in compute_kwargs.keys()
), "domain kwargs should be existent"
assert accessor_kwargs == {
"column_A": "a",
"column_B": "b",
}, "Accessor kwargs have been modified"
# Building new engine so that values still found
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data2, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column_A": "a", "column_B": "b"}, domain_type="identity"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a"), sa.column("b")]).select_from(
engine.active_batch_data.selectable
)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data2)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {
"column_A": "a",
"column_B": "b",
}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing for only untested use case - multicolumn
def test_get_compute_domain_with_multicolumn(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None], "c": [1, 2, 3, None]}),
sa,
)
# Obtaining compute domain
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"columns": ["a", "b", "c"]}, domain_type="multicolumn"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs is not None, "Compute domain kwargs should be existent"
assert accessor_kwargs == {
"columns": ["a", "b", "c"]
}, "Accessor kwargs have been modified"
# Checking for identity
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"columns": ["a", "b", "c"]}, domain_type="identity"
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a"), sa.column("b"), sa.column("c")]).select_from(
engine.active_batch_data.selectable
)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that with no domain nothing happens to the data itself
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {
"columns": ["a", "b", "c"]
}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing whether compute domain is properly calculated, but this time obtaining a column
def test_get_compute_domain_with_column_domain(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Loading batch data
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column": "a"}, domain_type=MetricDomainTypes.COLUMN
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {"column": "a"}, "Accessor kwargs have been modified"
# Testing for identity
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Loading batch data
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={"column": "a"}, domain_type=MetricDomainTypes.IDENTITY
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select([sa.column("a")]).select_from(engine.active_batch_data.selectable)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
assert compute_kwargs == {"column": "a"}, "Compute domain kwargs should be existent"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# What happens when we filter such that no value meets the condition?
def test_get_compute_domain_with_unmeetable_row_condition(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"row_condition": 'col("b") > 24',
"condition_parser": "great_expectations__experimental__",
},
domain_type="identity",
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") > 24)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
# Testing to ensure that great expectation experimental parser also works in terms of defining a compute domain
def test_get_compute_domain_with_ge_experimental_condition_parser(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Obtaining data from computation
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"column": "b",
"row_condition": 'col("b") == 2',
"condition_parser": "great_expectations__experimental__",
},
domain_type="column",
)
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") == 2)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring that column domain is now an accessor kwarg, and data remains unmodified
assert raw_data == domain_data, "Data does not match after getting compute domain"
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {"column": "b"}, "Accessor kwargs have been modified"
# Should react differently for domain type identity
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"column": "b",
"row_condition": 'col("b") == 2',
"condition_parser": "great_expectations__experimental__",
},
domain_type="identity",
)
# Ensuring data has been properly queried
# Seeing if raw data is the same as the data after condition has been applied - checking post computation data
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
raw_data = engine.engine.execute(
sa.select(["*"])
.select_from(engine.active_batch_data.selectable)
.where(sa.column("b") == 2)
).fetchall()
domain_data = engine.engine.execute(sa.select(["*"]).select_from(data)).fetchall()
# Ensuring compute kwargs have not been modified
assert (
"row_condition" in compute_kwargs.keys()
), "Row condition should be located within compute kwargs"
assert accessor_kwargs == {}, "Accessor kwargs have been modified"
def test_get_compute_domain_with_nonexistent_condition_parser(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 3, 4, None]}), sa
)
# Expect GreatExpectationsError because parser doesn't exist
with pytest.raises(GreatExpectationsError) as e:
data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(
domain_kwargs={
"row_condition": "b > 24",
"condition_parser": "nonexistent",
},
domain_type=MetricDomainTypes.TABLE,
)
# Ensuring that we can properly inform user when metric doesn't exist - should get a metric provider error
def test_resolve_metric_bundle_with_nonexistent_metric(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
desired_metric_1 = MetricConfiguration(
metric_name="column_values.unique",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
)
desired_metric_4 = MetricConfiguration(
metric_name="column.does_not_exist",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=dict(),
)
# Ensuring a metric provider error is raised if metric does not exist
with pytest.raises(MetricProviderError) as e:
res = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
)
)
print(e)
def test_get_batch_data_and_markers_using_query(sqlite_view_engine, test_df):
my_execution_engine: SqlAlchemyExecutionEngine = SqlAlchemyExecutionEngine(
engine=sqlite_view_engine
)
test_df.to_sql("test_table_0", con=my_execution_engine.engine)
query: str = "SELECT * FROM test_table_0"
batch_data, batch_markers = my_execution_engine.get_batch_data_and_markers(
batch_spec=RuntimeQueryBatchSpec(
query=query,
)
)
assert len(get_sqlite_temp_table_names(sqlite_view_engine)) == 2
assert batch_markers.get("ge_load_time") is not None
def test_sa_batch_unexpected_condition_temp_table(caplog, sa):
def validate_tmp_tables():
temp_tables = [
name
for name in get_sqlite_temp_table_names(engine.engine)
if name.startswith("ge_tmp_")
]
tables = [
name
for name in get_sqlite_table_names(engine.engine)
if name.startswith("ge_tmp_")
]
assert len(temp_tables) == 0
assert len(tables) == 0
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
validate_tmp_tables()
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
validate_tmp_tables()
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=dict(),
metric_dependencies={
"unexpected_condition": condition_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
validate_tmp_tables()
|
apache-2.0
|
ThiefMaster/indico
|
indico/modules/designer/pdf.py
|
4
|
6842
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
from collections import namedtuple
from io import BytesIO
from PIL import Image
from reportlab.lib import colors, pagesizes
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY, TA_LEFT, TA_RIGHT
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import cm
from reportlab.lib.utils import ImageReader
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus import Paragraph
from indico.legacy.pdfinterface.base import setTTFonts
from indico.modules.designer import PageOrientation
from indico.util.string import strip_tags
FONT_STYLES = {
'serif': ['Times-Roman', 'Times-Bold', 'Times-Italic', 'Times-Bold-Italic'],
'courier': ['Courier', 'Courier-Bold', 'Courier-Italic', 'Courier-Bold-Italic'],
'sans-serif': ['Sans', 'Sans-Bold', 'Sans-Italic', 'Sans-Bold-Italic'],
'LinuxLibertine': ['LinuxLibertine', 'LinuxLibertine-Bold', 'LinuxLibertine-Italic', 'LinuxLibertine-Bold-Italic'],
'Kochi-Mincho': ['Kochi-Mincho', 'Kochi-Mincho', 'Kochi-Mincho', 'Kochi-Mincho'],
'Kochi-Gothic': ['Kochi-Gothic', 'Kochi-Gothic', 'Kochi-Gothic', 'Kochi-Gothic'],
'Uming-CN': ['Uming-CN', 'Uming-CN', 'Uming-CN', 'Uming-CN']
}
ALIGNMENTS = {
'left': TA_LEFT,
'right': TA_RIGHT,
'center': TA_CENTER,
'justified': TA_JUSTIFY
}
AVAILABLE_COLOR_NAMES = {'black', 'red', 'blue', 'green', 'yellow', 'brown', 'cyan', 'gold', 'pink', 'gray', 'white'}
COLORS = {k: getattr(colors, k) for k in AVAILABLE_COLOR_NAMES}
PIXELS_CM = 50
FONT_SIZE_RE = re.compile(r'(\d+)(pt)?')
TplData = namedtuple('TplData', ['width', 'height', 'items', 'background_position', 'width_cm', 'height_cm'])
def _extract_font_size(text):
return int(FONT_SIZE_RE.match(text).group(1))
class DesignerPDFBase:
def __init__(self, template, config):
self.config = self._build_config(config)
self.template = template
self.tpl_data = self._process_tpl_data(template.data)
self.backside_tpl_data = None
if template.backside_template:
self.backside_tpl_data = self._process_tpl_data(template.backside_template.data)
self.page_size = self.config.page_size.size
if self.config.page_orientation == PageOrientation.landscape:
self.page_size = pagesizes.landscape(self.page_size)
self.width, self.height = self.page_size
setTTFonts()
def _process_tpl_data(self, tpl_data):
return TplData(width_cm=(float(tpl_data['width']) / PIXELS_CM),
height_cm=(float(tpl_data['height']) / PIXELS_CM),
**tpl_data)
def _remove_transparency(self, fd):
"""Remove transparency from an image and replace it with white."""
img = Image.open(fd)
# alpha-channel PNG: replace the transparent areas with plain white
if img.mode == 'RGBA':
new = Image.new(img.mode[:-1], img.size, (255, 255, 255))
new.paste(img, img.split()[-1])
fd = BytesIO()
new.save(fd, 'JPEG')
# XXX: this code does not handle palette (type P) images, such as
# 8-bit PNGs, but luckily they are somewhat rare nowadays
fd.seek(0)
return fd
def get_pdf(self):
data = BytesIO()
canvas = Canvas(data, pagesize=self.page_size)
self._build_pdf(canvas)
canvas.save()
data.seek(0)
return data
def _draw_item(self, canvas, item, tpl_data, content, margin_x, margin_y):
style = ParagraphStyle({})
style.alignment = ALIGNMENTS[item['text_align']]
style.textColor = COLORS[item['color']]
style.fontSize = _extract_font_size(item['font_size'])
style.leading = style.fontSize
if item['bold'] and item['italic']:
style.fontName = FONT_STYLES[item['font_family']][3]
elif item['italic']:
style.fontName = FONT_STYLES[item['font_family']][2]
elif item['bold']:
style.fontName = FONT_STYLES[item['font_family']][1]
else:
style.fontName = FONT_STYLES[item['font_family']][0]
item_x = float(item['x']) / PIXELS_CM * cm
item_y = float(item['y']) / PIXELS_CM * cm
item_width = item['width'] / PIXELS_CM * cm
item_height = (item['height'] / PIXELS_CM * cm) if item.get('height') is not None else None
if isinstance(content, Image.Image):
canvas.drawImage(ImageReader(content), margin_x + item_x, self.height - margin_y - item_height - item_y,
item_width, item_height)
else:
content = strip_tags(content)
for line in content.splitlines():
p = Paragraph(line, style)
available_height = (tpl_data.height_cm - (item_y / PIXELS_CM)) * cm
w, h = p.wrap(item_width, available_height)
if w > item_width or h > available_height:
# TODO: add warning
pass
p.drawOn(canvas, margin_x + item_x, self.height - margin_y - item_y - h)
item_y += h
def _draw_background(self, canvas, img_reader, tpl_data, pos_x, pos_y, width, height):
img_width, img_height = img_reader.getSize()
if tpl_data.background_position == 'stretch':
bg_x = pos_x
bg_y = pos_y
bg_width = width
bg_height = height
else:
bg_width = img_width
bg_height = img_height
page_width = width
page_height = height
bg_x = pos_x + (page_width - bg_width) / 2.0
bg_y = pos_y + (page_height - bg_height) / 2.0
if bg_width > page_width:
ratio = float(page_width) / bg_width
bg_width = page_width
bg_height *= ratio
bg_x = pos_x
bg_y = pos_y + (page_height - bg_height) / 2.0
if bg_height > page_height:
ratio = float(page_height) / bg_height
bg_height = page_height
bg_width *= ratio
bg_x = pos_x + (page_width - bg_width) / 2.0
bg_y = pos_y
canvas.drawImage(img_reader, bg_x, bg_y, bg_width, bg_height)
def _build_config(self, config_data):
"""Build a structured configuration object.
Should be implemented by inheriting classes
"""
return NotImplementedError
def _build_pdf(self, canvas):
"""Generate the actual PDF.
Should be implemented by inheriting classes.
"""
raise NotImplementedError
|
mit
|
gurneyalex/odoo
|
doc/_extensions/odoo_ext/switcher.py
|
27
|
1543
|
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from pygments.lexers import get_lexer_by_name, PythonLexer
PythonLexer.name = 'Python 2'
def setup(app):
app.add_directive('switcher', SwitcherDirective)
app.add_directive('case', CaseDirective)
class SwitcherDirective(Directive):
has_content = True
def run(self):
self.assert_has_content()
body = nodes.compound('\n'.join(self.content), classes=['tabs'])
self.state.nested_parse(self.content, self.content_offset, body)
titles = []
for child in body.children:
if isinstance(child, nodes.literal_block):
titles.append(get_lexer_by_name(child['language']).name)
else:
assert child['names'], ("A switcher case must be either a "\
"code block or a compound with a name")
titles.append(' '.join(child['names']))
tabs = nodes.bullet_list('', *[
nodes.list_item('', nodes.Text(title))
for title in titles
])
node = nodes.compound('', tabs, body, classes=['content-switcher'])
return [node]
class CaseDirective(Directive):
required_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
node = nodes.compound('\n'.join(self.content), names=[self.arguments[0]])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
|
agpl-3.0
|
thatchristoph/namebench
|
nb_third_party/httplib2/__init__.py
|
451
|
51082
|
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
|
apache-2.0
|
paulmadore/Eric-IDE
|
6-6.0.9/eric/Helpviewer/UserAgent/UserAgentManager.py
|
2
|
7095
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a user agent manager.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, QObject, QXmlStreamReader
from E5Gui import E5MessageBox
from Utilities.AutoSaver import AutoSaver
import Utilities
class UserAgentManager(QObject):
"""
Class implementing a user agent manager.
@signal changed() emitted to indicate a change
@signal userAgentSettingsSaved() emitted after the user agent settings
were saved
"""
changed = pyqtSignal()
userAgentSettingsSaved = pyqtSignal()
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(UserAgentManager, self).__init__(parent)
self.__agents = {}
# dictionary with agent strings indexed by host name
self.__loaded = False
self.__saveTimer = AutoSaver(self, self.save)
self.changed.connect(self.__saveTimer.changeOccurred)
def getFileName(self):
"""
Public method to get the file name of the user agents file.
@return name of the user agents file (string)
"""
return os.path.join(
Utilities.getConfigDir(), "browser", "userAgentSettings.xml")
def save(self):
"""
Public slot to save the user agent entries to disk.
"""
if not self.__loaded:
return
from .UserAgentWriter import UserAgentWriter
agentFile = self.getFileName()
writer = UserAgentWriter()
if not writer.write(agentFile, self.__agents):
E5MessageBox.critical(
None,
self.tr("Saving user agent data"),
self.tr(
"""<p>User agent data could not be saved to"""
""" <b>{0}</b></p>""").format(agentFile))
else:
self.userAgentSettingsSaved.emit()
def __load(self):
"""
Private method to load the saved user agent settings.
"""
agentFile = self.getFileName()
if not os.path.exists(agentFile):
self.__loadNonXml(os.path.splitext(agentFile)[0])
else:
from .UserAgentReader import UserAgentReader
reader = UserAgentReader()
self.__agents = reader.read(agentFile)
if reader.error() != QXmlStreamReader.NoError:
E5MessageBox.warning(
None,
self.tr("Loading user agent data"),
self.tr("""Error when loading user agent data on"""
""" line {0}, column {1}:\n{2}""")
.format(reader.lineNumber(),
reader.columnNumber(),
reader.errorString()))
self.__loaded = True
def __loadNonXml(self, agentFile):
"""
Private method to load non-XML user agent files.
This method is to convert from the old, non-XML format to the new
XML based format.
@param agentFile name of the non-XML user agent file (string)
"""
if os.path.exists(agentFile):
try:
f = open(agentFile, "r", encoding="utf-8")
lines = f.read()
f.close()
except IOError as err:
E5MessageBox.critical(
None,
self.tr("Loading user agent data"),
self.tr("""<p>User agent data could not be loaded """
"""from <b>{0}</b></p>"""
"""<p>Reason: {1}</p>""")
.format(agentFile, str(err)))
return
for line in lines.splitlines():
if not line or \
line.startswith("#") or \
"@@" not in line:
continue
host, agent = line.split("@@", 1)
self.__agents[host] = agent
os.remove(agentFile)
self.__loaded = True
# this does the conversion
self.save()
def reload(self):
"""
Public method to reload the user agent settings.
"""
if not self.__loaded:
return
self.__agents = {}
self.__load()
def close(self):
"""
Public method to close the user agents manager.
"""
self.__saveTimer.saveIfNeccessary()
def removeUserAgent(self, host):
"""
Public method to remove a user agent entry.
@param host host name (string)
"""
if host in self.__agents:
del self.__agents[host]
self.changed.emit()
def allHostNames(self):
"""
Public method to get a list of all host names we a user agent setting
for.
@return sorted list of all host names (list of strings)
"""
if not self.__loaded:
self.__load()
return sorted(self.__agents.keys())
def hostsCount(self):
"""
Public method to get the number of available user agent settings.
@return number of user agent settings (integer)
"""
if not self.__loaded:
self.__load()
return len(self.__agents)
def userAgent(self, host):
"""
Public method to get the user agent setting for a host.
@param host host name (string)
@return user agent string (string)
"""
if not self.__loaded:
self.__load()
if host not in self.__agents:
return ""
return self.__agents[host]
def setUserAgent(self, host, agent):
"""
Public method to set the user agent string for a host.
@param host host name (string)
@param agent user agent string (string)
"""
if host != "" and agent != "":
self.__agents[host] = agent
self.changed.emit()
def userAgentForUrl(self, url):
"""
Public method to determine the user agent for the given URL.
@param url URL to determine user agent for (QUrl)
@return user agent string (string)
"""
if url.isValid():
host = url.host()
return self.userAgent(host)
return ""
def setUserAgentForUrl(self, url, agent):
"""
Public method to set the user agent string for an URL.
@param url URL to register user agent setting for (QUrl)
@param agent new current user agent string (string)
"""
if url.isValid():
host = url.host()
self.setUserAgent(host, agent)
|
gpl-3.0
|
xq262144/hue
|
desktop/core/ext-py/Django-1.6.10/django/db/backends/dummy/base.py
|
114
|
2198
|
"""
Dummy database backend for Django.
Django uses this if the database ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends.creation import BaseDatabaseCreation
def complain(*args, **kwargs):
raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
"Please supply the ENGINE value. Check "
"settings documentation for more details.")
def ignore(*args, **kwargs):
pass
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
class DatabaseOperations(BaseDatabaseOperations):
quote_name = complain
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseCreation(BaseDatabaseCreation):
create_test_db = ignore
destroy_test_db = ignore
class DatabaseIntrospection(BaseDatabaseIntrospection):
get_table_list = complain
get_table_description = complain
get_relations = complain
get_indexes = complain
get_key_columns = complain
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {}
# Override the base class implementations with null
# implementations. Anything that tries to actually
# do something raises complain; anything that tries
# to rollback or undo something raises ignore.
_cursor = complain
_commit = complain
_rollback = ignore
_close = ignore
_savepoint = ignore
_savepoint_commit = complain
_savepoint_rollback = ignore
_set_autocommit = complain
set_dirty = complain
set_clean = complain
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = BaseDatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def is_usable(self):
return True
|
apache-2.0
|
lintzc/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/Filerep_Resync/test_filerep_resync.py
|
9
|
4156
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.lib import local_path
from tinctest.models.scenario import ScenarioTestCase
from tinctest.main import TINCException
from mpp.lib.PSQL import PSQL
from mpp.lib.gprecoverseg import GpRecover
class FilerepResyncException(TINCException): pass
'''
Filerep Resync scenario
'''
class FilerepResync(ScenarioTestCase):
"""
@description test cases for MPP-11167
@created 2013-03-15 10:10:10
@modified 2013-05-07 17:10:15
@tags persistent tables schedule_filerep
@product_version gpdb:
"""
@classmethod
def setUpClass(cls):
super(FilerepResync,cls).setUpClass()
tinctest.logger.info('Setting up the filerep resync test.')
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
def test_filerep_resysnc(self):
#Step 1: Create an append-only table
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.AOTable")
self.test_case_scenario.append(test_case_list1)
#Step 2:1 Begin a transaction & insert values into created table
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.runsql.TransactionTest.Transaction")
#Step 2:2 Start a concurrent process to kill all the mirror processes.
# It should start only after the begin & insert are performed
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.ProcessKill")
self.test_case_scenario.append(test_case_list2)
#Step 3: Check the persistent table for duplicate entries
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_hitting_fault")
self.test_case_scenario.append(test_case_list3)
#Step 4: Perform incremental recovery
test_case_list4 = []
test_case_list4.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Recovery")
self.test_case_scenario.append(test_case_list4)
#Step 5: Check if the mirror segments are up or not
test_case_list5 = []
test_case_list5.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Health")
self.test_case_scenario.append(test_case_list5)
#Step 6: Re-check the persistent table for duplicate entries
test_case_list6 = []
test_case_list6.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_recovery")
self.test_case_scenario.append(test_case_list6)
#Step 7: Check the Sate of DB and Cluster
test_case_list7 = []
test_case_list7.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog")
self.test_case_scenario.append(test_case_list7)
test_case_list8 = []
test_case_list8.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.test_filerep_resync.FilerepResync.wait_till_insync_transition")
self.test_case_scenario.append(test_case_list8)
test_case_list9 = []
test_case_list9.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_mirrorintegrity")
self.test_case_scenario.append(test_case_list9)
|
apache-2.0
|
apollo13/ansible
|
test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py
|
44
|
2221
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: statichost
short_description: Add a single host
description: Add a single host
extends_documentation_fragment:
- inventory_cache
options:
plugin:
description: plugin name (must be statichost)
required: true
hostname:
description: Toggle display of stderr even when script was successful
required: True
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
NAME = 'testns.content_adj.statichost'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check '''
if not path.endswith('.statichost.yml') and not path.endswith('.statichost.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
# Initialize and validate options
self._read_config_data(path)
# Exercise cache
cache_key = self.get_cache_key(path)
attempt_to_read_cache = self.get_option('cache') and cache
cache_needs_update = self.get_option('cache') and not cache
if attempt_to_read_cache:
try:
host_to_add = self._cache[cache_key]
except KeyError:
cache_needs_update = True
if not attempt_to_read_cache or cache_needs_update:
host_to_add = self.get_option('hostname')
# this is where the magic happens
self.inventory.add_host(host_to_add, 'all')
self._cache[cache_key] = host_to_add
# self.inventory.add_group()...
# self.inventory.add_child()...
# self.inventory.set_variable()..
|
gpl-3.0
|
csantoshc/Synapse-Detection
|
Tools/libsvm-3.12/libsvm-3.12/tools/easy.py
|
152
|
2699
|
#!/usr/bin/env python
import sys
import os
from subprocess import *
if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit
# svm, grid, and gnuplot executable files
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"
assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"
train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"
if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"
cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout
line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())
print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))
cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()
cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()
print('Output prediction: {0}'.format(predict_test_file))
|
mit
|
Bismarrck/pymatgen
|
pymatgen/alchemy/transmuters.py
|
1
|
16738
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
<<<<<<< HEAD
=======
from pymatgen.io.vasp.sets import MPRelaxSet
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
# need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
<<<<<<< HEAD
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
=======
def write_vasp_input(self, **kwargs):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
\\*\\*kwargs: All kwargs supported by batch_write_vasp_input.
"""
<<<<<<< HEAD
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
=======
batch_write_vasp_input(self.transformed_structures, **kwargs)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super(CifTransmuter, self).__init__(transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super(PoscarTransmuter, self).__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
<<<<<<< HEAD
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
=======
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
<<<<<<< HEAD
create_directory=create_directory)
=======
create_directory=create_directory, **kwargs)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
|
mit
|
hrishioa/Aviato
|
flask/Lib/site-packages/setuptools/_backport/hashlib/_sha.py
|
77
|
9982
|
# -*- coding: iso-8859-1 -*-
"""A sample implementation of SHA-1 in pure Python.
Framework adapted from Dinu Gherman's MD5 implementation by
J. Hallén and L. Creighton. SHA-1 implementation based directly on
the text of the NIST standard FIPS PUB 180-1.
"""
__date__ = '2004-11-17'
__version__ = 0.91 # Modernised by J. Hallén and L. Creighton for Pypy
import struct, copy
# ======================================================================
# Bit-Manipulation helpers
#
# _long2bytes() was contributed by Barry Warsaw
# and is reused here with tiny modifications.
# ======================================================================
def _long2bytesBigEndian(n, blocksize=0):
"""Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize.
"""
# After much testing, this algorithm was deemed to be the fastest.
s = ''
pack = struct.pack
while n > 0:
s = pack('>I', n & 0xffffffff) + s
n = n >> 32
# Strip off leading zeros.
for i in range(len(s)):
if s[i] != '\000':
break
else:
# Only happens when n == 0.
s = '\000'
i = 0
s = s[i:]
# Add back some pad bytes. This could be done more efficiently
# w.r.t. the de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s
def _bytelist2longBigEndian(list):
"Transform a list of characters into a list of longs."
imax = len(list) // 4
hl = [0] * imax
j = 0
i = 0
while i < imax:
b0 = ord(list[j]) << 24
b1 = ord(list[j+1]) << 16
b2 = ord(list[j+2]) << 8
b3 = ord(list[j+3])
hl[i] = b0 | b1 | b2 | b3
i = i+1
j = j+4
return hl
def _rotateLeft(x, n):
"Rotate x (32 bit) left n bits circularly."
return (x << n) | (x >> (32-n))
# ======================================================================
# The SHA transformation functions
#
# ======================================================================
def f0_19(B, C, D):
return (B & C) | ((~ B) & D)
def f20_39(B, C, D):
return B ^ C ^ D
def f40_59(B, C, D):
return (B & C) | (B & D) | (C & D)
def f60_79(B, C, D):
return B ^ C ^ D
f = [f0_19, f20_39, f40_59, f60_79]
# Constants to be used
K = [
0x5A827999, # ( 0 <= t <= 19)
0x6ED9EBA1, # (20 <= t <= 39)
0x8F1BBCDC, # (40 <= t <= 59)
0xCA62C1D6 # (60 <= t <= 79)
]
class sha:
"An implementation of the MD5 hash function in pure Python."
digest_size = digestsize = 20
block_size = 1
def __init__(self):
"Initialisation."
# Initial message length in bits(!).
self.length = 0
self.count = [0, 0]
# Initial empty message as a sequence of bytes (8 bit characters).
self.input = []
# Call a separate init function, that can be used repeatedly
# to start from scratch on the same object.
self.init()
def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0
self.input = []
# Initial 160 bit message digest (5 times 32 bit).
self.H0 = 0x67452301
self.H1 = 0xEFCDAB89
self.H2 = 0x98BADCFE
self.H3 = 0x10325476
self.H4 = 0xC3D2E1F0
def _transform(self, W):
for t in range(16, 80):
W.append(_rotateLeft(
W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1) & 0xffffffff)
A = self.H0
B = self.H1
C = self.H2
D = self.H3
E = self.H4
"""
This loop was unrolled to gain about 10% in speed
for t in range(0, 80):
TEMP = _rotateLeft(A, 5) + f[t/20] + E + W[t] + K[t/20]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
"""
for t in range(0, 20):
TEMP = _rotateLeft(A, 5) + ((B & C) | ((~ B) & D)) + E + W[t] + K[0]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(20, 40):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[1]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(40, 60):
TEMP = _rotateLeft(A, 5) + ((B & C) | (B & D) | (C & D)) + E + W[t] + K[2]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
for t in range(60, 80):
TEMP = _rotateLeft(A, 5) + (B ^ C ^ D) + E + W[t] + K[3]
E = D
D = C
C = _rotateLeft(B, 30) & 0xffffffff
B = A
A = TEMP & 0xffffffff
self.H0 = (self.H0 + A) & 0xffffffff
self.H1 = (self.H1 + B) & 0xffffffff
self.H2 = (self.H2 + C) & 0xffffffff
self.H3 = (self.H3 + D) & 0xffffffff
self.H4 = (self.H4 + E) & 0xffffffff
# Down from here all methods follow the Python Standard Library
# API of the sha module.
def update(self, inBuf):
"""Add to the current message.
Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments, i.e. m.update(a); m.update(b) is equivalent
to m.update(a+b).
The hash is immediately calculated for all full blocks. The final
calculation is made in digest(). It will calculate 1-2 blocks,
depending on how much padding we have to add. This allows us to
keep an intermediate value for the hash, so that we only need to
make minimal recalculation if we call update() to add more data
to the hashed string.
"""
leninBuf = len(inBuf)
# Compute number of bytes mod 64.
index = (self.count[1] >> 3) & 0x3F
# Update number of bits.
self.count[1] = self.count[1] + (leninBuf << 3)
if self.count[1] < (leninBuf << 3):
self.count[0] = self.count[0] + 1
self.count[0] = self.count[0] + (leninBuf >> 29)
partLen = 64 - index
if leninBuf >= partLen:
self.input[index:] = list(inBuf[:partLen])
self._transform(_bytelist2longBigEndian(self.input))
i = partLen
while i + 63 < leninBuf:
self._transform(_bytelist2longBigEndian(list(inBuf[i:i+64])))
i = i + 64
else:
self.input = list(inBuf[i:leninBuf])
else:
i = 0
self.input = self.input + list(inBuf)
def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
H0 = self.H0
H1 = self.H1
H2 = self.H2
H3 = self.H3
H4 = self.H4
input = [] + self.input
count = [] + self.count
index = (self.count[1] >> 3) & 0x3f
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = ['\200'] + ['\000'] * 63
self.update(padding[:padLen])
# Append length (before padding).
bits = _bytelist2longBigEndian(self.input[:56]) + count
self._transform(bits)
# Store state in digest.
digest = _long2bytesBigEndian(self.H0, 4) + \
_long2bytesBigEndian(self.H1, 4) + \
_long2bytesBigEndian(self.H2, 4) + \
_long2bytesBigEndian(self.H3, 4) + \
_long2bytesBigEndian(self.H4, 4)
self.H0 = H0
self.H1 = H1
self.H2 = H2
self.H3 = H3
self.H4 = H4
self.input = input
self.count = count
return digest
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
return ''.join(['%02x' % ord(c) for c in self.digest()])
def copy(self):
"""Return a clone object.
Return a copy ('clone') of the md5 object. This can be used
to efficiently compute the digests of strings that share
a common initial substring.
"""
return copy.deepcopy(self)
# ======================================================================
# Mimic Python top-level functions from standard library API
# for consistency with the _sha module of the standard library.
# ======================================================================
# These are mandatory variables in the module. They have constant values
# in the SHA standard.
digest_size = 20
digestsize = 20
blocksize = 1
def new(arg=None):
"""Return a new sha crypto object.
If arg is present, the method call update(arg) is made.
"""
crypto = sha()
if arg:
crypto.update(arg)
return crypto
if __name__ == "__main__":
a_str = "just a test string"
assert 'da39a3ee5e6b4b0d3255bfef95601890afd80709' == new().hexdigest()
assert '3f0cf2e3d9e5903e839417dfc47fed6bfa6457f6' == new(a_str).hexdigest()
assert '0852b254078fe3772568a4aba37b917f3d4066ba' == new(a_str*7).hexdigest()
s = new(a_str)
s.update(a_str)
assert '8862c1b50967f39d3db6bdc2877d9ccebd3102e5' == s.hexdigest()
|
gpl-2.0
|
rrooij/youtube-dl
|
youtube_dl/extractor/wdr.py
|
23
|
12523
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
js_to_json,
strip_jsonp,
try_get,
unified_strdate,
update_url_query,
urlhandle_detect_ext,
)
class WDRIE(InfoExtractor):
_VALID_URL = r'https?://deviceids-medp\.wdr\.de/ondemand/\d+/(?P<id>\d+)\.js'
_GEO_COUNTRIES = ['DE']
_TEST = {
'url': 'http://deviceids-medp.wdr.de/ondemand/155/1557833.js',
'info_dict': {
'id': 'mdb-1557833',
'ext': 'mp4',
'title': 'Biathlon-Staffel verpasst Podest bei Olympia-Generalprobe',
'upload_date': '20180112',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
metadata = self._download_json(
url, video_id, transform_source=strip_jsonp)
is_live = metadata.get('mediaType') == 'live'
tracker_data = metadata['trackerData']
media_resource = metadata['mediaResource']
formats = []
# check if the metadata contains a direct URL to a file
for kind, media_resource in media_resource.items():
if kind not in ('dflt', 'alt'):
continue
for tag_name, medium_url in media_resource.items():
if tag_name not in ('videoURL', 'audioURL'):
continue
ext = determine_ext(medium_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
medium_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls'))
elif ext == 'f4m':
manifest_url = update_url_query(
medium_url, {'hdcore': '3.2.0', 'plugin': 'aasp-3.2.0.77.18'})
formats.extend(self._extract_f4m_formats(
manifest_url, video_id, f4m_id='hds', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
medium_url, 'stream', fatal=False))
else:
a_format = {
'url': medium_url
}
if ext == 'unknown_video':
urlh = self._request_webpage(
medium_url, video_id, note='Determining extension')
ext = urlhandle_detect_ext(urlh)
a_format['ext'] = ext
formats.append(a_format)
self._sort_formats(formats)
subtitles = {}
caption_url = media_resource.get('captionURL')
if caption_url:
subtitles['de'] = [{
'url': caption_url,
'ext': 'ttml',
}]
title = tracker_data['trackerClipTitle']
return {
'id': tracker_data.get('trackerClipId', video_id),
'title': self._live_title(title) if is_live else title,
'alt_title': tracker_data.get('trackerClipSubcategory'),
'formats': formats,
'subtitles': subtitles,
'upload_date': unified_strdate(tracker_data.get('trackerClipAirTime')),
'is_live': is_live,
}
class WDRPageIE(InfoExtractor):
_CURRENT_MAUS_URL = r'https?://(?:www\.)wdrmaus.de/(?:[^/]+/){1,2}[^/?#]+\.php5'
_PAGE_REGEX = r'/(?:mediathek/)?(?:[^/]+/)*(?P<display_id>[^/]+)\.html'
_VALID_URL = r'https?://(?:www\d?\.)?(?:wdr\d?|sportschau)\.de' + _PAGE_REGEX + '|' + _CURRENT_MAUS_URL
_TESTS = [
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/doku-am-freitag/video-geheimnis-aachener-dom-100.html',
# HDS download, MD5 is unstable
'info_dict': {
'id': 'mdb-1058683',
'ext': 'flv',
'display_id': 'doku-am-freitag/video-geheimnis-aachener-dom-100',
'title': 'Geheimnis Aachener Dom',
'alt_title': 'Doku am Freitag',
'upload_date': '20160304',
'description': 'md5:87be8ff14d8dfd7a7ee46f0299b52318',
'is_live': False,
'subtitles': {'de': [{
'url': 'http://ondemand-ww.wdr.de/medp/fsk0/105/1058683/1058683_12220974.xml',
'ext': 'ttml',
}]},
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www1.wdr.de/mediathek/audio/wdr3/wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100.html',
'md5': 'f4c1f96d01cf285240f53ea4309663d8',
'info_dict': {
'id': 'mdb-1072000',
'ext': 'mp3',
'display_id': 'wdr3-gespraech-am-samstag/audio-schriftstellerin-juli-zeh-100',
'title': 'Schriftstellerin Juli Zeh',
'alt_title': 'WDR 3 Gespräch am Samstag',
'upload_date': '20160312',
'description': 'md5:e127d320bc2b1f149be697ce044a3dd7',
'is_live': False,
'subtitles': {}
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www1.wdr.de/mediathek/video/live/index.html',
'info_dict': {
'id': 'mdb-1406149',
'ext': 'mp4',
'title': r're:^WDR Fernsehen im Livestream \(nur in Deutschland erreichbar\) [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'alt_title': 'WDR Fernsehen Live',
'upload_date': '20150101',
'is_live': True,
},
'params': {
'skip_download': True, # m3u8 download
},
},
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html',
'playlist_mincount': 7,
'info_dict': {
'id': 'aktuelle-stunde-120',
},
},
{
'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5',
'info_dict': {
'id': 'mdb-1552552',
'ext': 'mp4',
'upload_date': 're:^[0-9]{8}$',
'title': 're:^Die Sendung mit der Maus vom [0-9.]{10}$',
},
'skip': 'The id changes from week to week because of the new episode'
},
{
'url': 'http://www.wdrmaus.de/filme/sachgeschichten/achterbahn.php5',
'md5': '803138901f6368ee497b4d195bb164f2',
'info_dict': {
'id': 'mdb-186083',
'ext': 'mp4',
'upload_date': '20130919',
'title': 'Sachgeschichte - Achterbahn ',
},
},
{
'url': 'http://www1.wdr.de/radio/player/radioplayer116~_layout-popupVersion.html',
# Live stream, MD5 unstable
'info_dict': {
'id': 'mdb-869971',
'ext': 'mp4',
'title': r're:^COSMO Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'upload_date': '20160101',
},
'params': {
'skip_download': True, # m3u8 download
}
},
{
'url': 'http://www.sportschau.de/handballem2018/handball-nationalmannschaft-em-stolperstein-vorrunde-100.html',
'info_dict': {
'id': 'mdb-1556012',
'ext': 'mp4',
'title': 'DHB-Vizepräsident Bob Hanning - "Die Weltspitze ist extrem breit"',
'upload_date': '20180111',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.sportschau.de/handballem2018/audio-vorschau---die-handball-em-startet-mit-grossem-favoritenfeld-100.html',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
entries = []
# Article with several videos
# for wdr.de the data-extension is in a tag with the class "mediaLink"
# for wdr.de radio players, in a tag with the class "wdrrPlayerPlayBtn"
# for wdrmaus, in a tag with the class "videoButton" (previously a link
# to the page in a multiline "videoLink"-tag)
for mobj in re.finditer(
r'''(?sx)class=
(?:
(["\'])(?:mediaLink|wdrrPlayerPlayBtn|videoButton)\b.*?\1[^>]+|
(["\'])videoLink\b.*?\2[\s]*>\n[^\n]*
)data-extension=(["\'])(?P<data>(?:(?!\3).)+)\3
''', webpage):
media_link_obj = self._parse_json(
mobj.group('data'), display_id, transform_source=js_to_json,
fatal=False)
if not media_link_obj:
continue
jsonp_url = try_get(
media_link_obj, lambda x: x['mediaObj']['url'], compat_str)
if jsonp_url:
entries.append(self.url_result(jsonp_url, ie=WDRIE.ie_key()))
# Playlist (e.g. https://www1.wdr.de/mediathek/video/sendungen/aktuelle-stunde/aktuelle-stunde-120.html)
if not entries:
entries = [
self.url_result(
compat_urlparse.urljoin(url, mobj.group('href')),
ie=WDRPageIE.ie_key())
for mobj in re.finditer(
r'<a[^>]+\bhref=(["\'])(?P<href>(?:(?!\1).)+)\1[^>]+\bdata-extension=',
webpage) if re.match(self._PAGE_REGEX, mobj.group('href'))
]
return self.playlist_result(entries, playlist_id=display_id)
class WDRElefantIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)wdrmaus\.de/elefantenseite/#(?P<id>.+)'
_TEST = {
'url': 'http://www.wdrmaus.de/elefantenseite/#folge_ostern_2015',
'info_dict': {
'title': 'Folge Oster-Spezial 2015',
'id': 'mdb-1088195',
'ext': 'mp4',
'age_limit': None,
'upload_date': '20150406'
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
# Table of Contents seems to always be at this address, so fetch it directly.
# The website fetches configurationJS.php5, which links to tableOfContentsJS.php5.
table_of_contents = self._download_json(
'https://www.wdrmaus.de/elefantenseite/data/tableOfContentsJS.php5',
display_id)
if display_id not in table_of_contents:
raise ExtractorError(
'No entry in site\'s table of contents for this URL. '
'Is the fragment part of the URL (after the #) correct?',
expected=True)
xml_metadata_path = table_of_contents[display_id]['xmlPath']
xml_metadata = self._download_xml(
'https://www.wdrmaus.de/elefantenseite/' + xml_metadata_path,
display_id)
zmdb_url_element = xml_metadata.find('./movie/zmdb_url')
if zmdb_url_element is None:
raise ExtractorError(
'%s is not a video' % display_id, expected=True)
return self.url_result(zmdb_url_element.text, ie=WDRIE.ie_key())
class WDRMobileIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://mobile-ondemand\.wdr\.de/
.*?/fsk(?P<age_limit>[0-9]+)
/[0-9]+/[0-9]+/
(?P<id>[0-9]+)_(?P<title>[0-9]+)'''
IE_NAME = 'wdr:mobile'
_TEST = {
'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4',
'info_dict': {
'title': '4283021',
'id': '421735',
'ext': 'mp4',
'age_limit': 0,
},
'skip': 'Problems with loading data.'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
return {
'id': mobj.group('id'),
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
'http_headers': {
'User-Agent': 'mobile',
},
}
|
unlicense
|
spectra-cluster/spectra-cluster-py
|
spectra_cluster/ui/clustering_stats.py
|
1
|
4988
|
"""clustering_stats
Extracts basic statistics (ie. number of clusters, incorrectly clustered spectra) from .clustering files.
This script only creates meaningful results if the .clustering file contains identification data which
will be used to evaluate correctly and incorrectly clustered spectra.
Usage:
clustering_stats --output <stats.tsv> --min_size <3> CLUSTERING_FILE...
clustering_stats (--help | --usage)
Options:
-o, --output <stats.tsv> Name of the result file to be created.
-s, --min_size <3> The minimum size a cluster must have to be evaluated [default: 3]
-h, --help Show this help message.
--usage Show usage information
"""
import os
import sys
from docopt import docopt
# make the spectra_cluster packages available
sys.path.insert(0, os.path.abspath('..') + os.path.sep + "..")
import spectra_cluster.clustering_parser
class ClusteringStatistics:
"""
This class is only intended to hold basic information about a .clustering file
"""
def __init__(self, filename, min_size, total_clusters, total_spectra, identified_spectra,
clustered_spectra, correct_spectra, incorrect_spectra, min_size_clusters, clustered_identified_spectra):
self.filename = filename
self.min_size = min_size
self.total_spectra = total_spectra
self.total_clusters = total_clusters
self.identified_spectra = identified_spectra
self.clustered_spectra = clustered_spectra
self.correct_spectra = correct_spectra
self.incorrect_spectra = incorrect_spectra
self.min_size_clusters = min_size_clusters
self.clustered_identified_spectra = clustered_identified_spectra
def determine_clustering_accuracy(clustering_file, min_size=3):
"""
Determines the clustering accuracy and number of clustered spectra
in the analysed .clustering file
:param clustering_file: The .clustering file to process
:param min_size: The minimum sizes a cluster must have to be evaluated
:return: dict(size => (rel. clustered spectra, accuracy))
"""
parser = spectra_cluster.clustering_parser.ClusteringParser(clustering_file)
total_spectra = 0
clustered_spectra = 0
clustered_identified_spectra = 0
correct_spectra = 0
incorrect_spectra = 0
total_clusters = 0
identified_spectra = 0
min_size_clusters = 0
# count correct and incorrect spectra per size
for cluster in parser:
total_spectra += cluster.n_spectra
identified_spectra += cluster.identified_spectra
total_clusters += 1
if cluster.identified_spectra >= min_size:
min_size_clusters += 1
clustered_spectra += cluster.n_spectra
clustered_identified_spectra += cluster.identified_spectra
correct_spectra += cluster.max_il_ratio * cluster.identified_spectra
incorrect_spectra += (1 - cluster.max_il_ratio) * cluster.identified_spectra
return ClusteringStatistics(filename=clustering_file, min_size=min_size, total_clusters=total_clusters,
total_spectra=total_spectra, identified_spectra=identified_spectra,
clustered_spectra=clustered_spectra,
correct_spectra=correct_spectra, incorrect_spectra=incorrect_spectra,
min_size_clusters=min_size_clusters,
clustered_identified_spectra=clustered_identified_spectra)
def main():
args = docopt(__doc__)
result_file = args["--output"]
min_size = int(args["--min_size"])
if os.path.isfile(result_file):
print("Error: Output file exists")
sys.exit(1)
# create the clustering statistics
stats = list()
for clustering_file in args["CLUSTERING_FILE"]:
print("Processing " + clustering_file + "...")
stats.append(determine_clustering_accuracy(clustering_file, min_size=min_size))
# create the result file
with open(result_file, "w") as writer:
writer.write("filename\tmin_size\ttotal_clusters\ttotal_spectra\tidentified_spectra\t" +
"clustered_spectra\tclustered_identified_spectra\tcorrect_spectra\tincorrect_spectra\t" +
"min_sized_clusters\n")
for result in stats:
writer.write("\t".join([
result.filename,
str(result.min_size),
str(result.total_clusters),
str(result.total_spectra),
str(result.identified_spectra),
str(result.clustered_spectra),
str(result.clustered_identified_spectra),
str(result.correct_spectra),
str(result.incorrect_spectra),
str(result.min_size_clusters)
]) + "\n")
print("Results written to " + result_file)
if __name__ == "__main__":
main()
|
apache-2.0
|
benjaminrigaud/django
|
django/utils/numberformat.py
|
337
|
1775
|
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils import six
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Gets a number (as a number or string), and returns it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping > 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(six.text_type(number))
# sign
sign = ''
str_number = six.text_type(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part:
dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
bsd-3-clause
|
alon/servo
|
tests/wpt/web-platform-tests/cors/resources/status.py
|
220
|
1223
|
def main(request, response):
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin") )
response.headers.set("Access-Control-Expose-Headers", "X-Request-Method")
if request.method == 'OPTIONS':
response.headers.set("Access-Control-Allow-Methods", "GET, CHICKEN, HEAD, POST, PUT")
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
response.headers.set("X-Request-Method", request.method)
response.headers.set("X-A-C-Request-Method", request.headers.get("Access-Control-Request-Method", ""));
#This should reasonably work for most response codes.
try:
code = int(request.GET.first("code", 200))
except ValueError:
code = 200
text = request.GET.first("text", "OMG")
if request.method == "OPTIONS" and "preflight" in request.GET:
try:
code = int(request.GET.first('preflight'))
except KeyError, ValueError:
pass
status = code, text
if "type" in request.GET:
response.headers.set("Content-Type", request.GET.first('type'))
body = request.GET.first('content', "")
return status, [], body
|
mpl-2.0
|
tdhopper/scikit-learn
|
examples/linear_model/plot_ols_ridge_variance.py
|
387
|
2060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
|
bsd-3-clause
|
seanli9jan/tensorflow
|
tensorflow/python/kernel_tests/decode_bmp_op_test.py
|
8
|
3135
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeBmpOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class DecodeBmpOpTest(test.TestCase):
def testex1(self):
img_bytes = [[[0, 0, 255], [0, 255, 0]], [[255, 0, 0], [255, 255, 255]]]
# Encoded BMP bytes from Wikipedia
encoded_bytes = [
0x42, 0x40,
0x46, 0, 0, 0,
0, 0,
0, 0,
0x36, 0, 0, 0,
0x28, 0, 0, 0,
0x2, 0, 0, 0,
0x2, 0, 0, 0,
0x1, 0,
0x18, 0,
0, 0, 0, 0,
0x10, 0, 0, 0,
0x13, 0xb, 0, 0,
0x13, 0xb, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0xff,
0xff, 0xff, 0xff,
0, 0,
0xff, 0, 0,
0, 0xff, 0,
0, 0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(image_ops.decode_bmp(img_in))
with self.cached_session():
decoded = decode.eval()
self.assertAllEqual(decoded, img_bytes)
def testGrayscale(self):
img_bytes = [[[255], [0]], [[255], [0]]]
encoded_bytes = [
0x42,
0x40,
0x3d,
0,
0,
0,
0,
0,
0,
0,
0x36,
0,
0,
0,
0x28,
0,
0,
0,
0x2,
0,
0,
0,
0x2,
0,
0,
0,
0x1,
0,
0x8,
0,
0,
0,
0,
0,
0x10,
0,
0,
0,
0x13,
0xb,
0,
0,
0x13,
0xb,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0xff,
0,
0,
0,
0xff,
0,
0,
0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = image_ops.decode_bmp(img_in)
with self.cached_session():
decoded = decode.eval()
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
LinuxChristian/home-assistant
|
homeassistant/components/scene/__init__.py
|
10
|
3131
|
"""
Allow users to set and activate scenes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/scene/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_PLATFORM, SERVICE_TURN_ON)
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.state import HASS_DOMAIN
from homeassistant.loader import get_platform
DOMAIN = 'scene'
STATE = 'scening'
STATES = 'states'
def _hass_domain_validator(config):
"""Validate platform in config for homeassistant domain."""
if CONF_PLATFORM not in config:
config = {
CONF_PLATFORM: HASS_DOMAIN, STATES: config}
return config
def _platform_validator(config):
"""Validate it is a valid platform."""
p_name = config[CONF_PLATFORM]
platform = get_platform(DOMAIN, p_name)
if not hasattr(platform, 'PLATFORM_SCHEMA'):
return config
return getattr(platform, 'PLATFORM_SCHEMA')(config)
PLATFORM_SCHEMA = vol.Schema(
vol.All(
_hass_domain_validator,
vol.Schema({
vol.Required(CONF_PLATFORM): cv.platform_validator(DOMAIN)
}, extra=vol.ALLOW_EXTRA),
_platform_validator
), extra=vol.ALLOW_EXTRA)
SCENE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
@bind_hass
def activate(hass, entity_id=None):
"""Activate a scene."""
data = {}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the scenes."""
logger = logging.getLogger(__name__)
component = EntityComponent(logger, DOMAIN, hass)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_scene_service(service):
"""Handle calls to the switch services."""
target_scenes = component.async_extract_from_service(service)
tasks = [scene.async_activate() for scene in target_scenes]
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_scene_service,
schema=SCENE_SERVICE_SCHEMA)
return True
class Scene(Entity):
"""A scene is a group of entities and the states we want them to be."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the scene."""
return STATE
def activate(self):
"""Activate scene. Try to get entities into requested state."""
raise NotImplementedError()
def async_activate(self):
"""Activate scene. Try to get entities into requested state.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.activate)
|
apache-2.0
|
ScorpionResponse/freelancefinder
|
freelancefinder/notifications/models.py
|
1
|
5186
|
"""Define the notification type and track sent notifications."""
import logging
from smtplib import SMTPException
from future.utils import python_2_unicode_compatible
from model_utils import Choices
from model_utils.fields import MonitorField
from model_utils.models import TimeStampedModel
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db import models
from django.template import Context, Template, loader
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class Message(models.Model):
"""Define a message to the user."""
url = models.CharField(max_length=100, db_index=True, unique=True, help_text="URL path starts with 'notifications/ and should have a trailing slash /. Eg. 'welcome/'")
subject = models.CharField(max_length=300, help_text="Can contain django template variables 'user' and 'message'")
email_body = models.TextField(blank=True, help_text="Can contain django template variables 'user' and 'message'")
slack_body = models.TextField(blank=True, help_text="Can contain django template variables 'user' and 'message'")
def __str__(self):
"""Representation for a Message."""
return u"<Message ID:{}; URL:{}; Subject:{}>".format(self.pk, self.url, self.subject)
@python_2_unicode_compatible
class Notification(models.Model):
"""Define a pending notification."""
TYPES = Choices('signup', 'welcome_package', 'one_time', 'expiration')
notification_type = models.CharField(choices=TYPES, default=TYPES.one_time, max_length=50)
message = models.ForeignKey(Message, on_delete=models.SET_NULL, null=True, related_name="notifications")
user = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name="notifications")
def __str__(self):
"""Representation for a Notification."""
return u"<Notification ID:{}; Type:{}; User: {}; Message:{}>".format(self.pk, self.notification_type, self.user, self.message)
def get_email_message(self, user=None):
"""Render the templates to create the message."""
if user is None:
if self.user is None:
raise RuntimeError('No user defined for this message.')
user = self.user
subject_template = Template(self.message.subject)
subject_context = Context({'user': user, 'message': self.message})
subject = subject_template.render(subject_context)
email_template = Template(self.message.email_body)
email_context = Context({'user': user, 'message': self.message})
email_message = email_template.render(email_context)
# The html template context must be the same as in
# notifications.views.NotificationView.get_context_data
html_template = loader.get_template('notifications/base.html')
html_context = {'user': user, 'message': self.message, 'subject': subject, 'email_message': email_message}
html_message = html_template.render(html_context)
txt_template = loader.get_template('notifications/base.txt')
txt_context = {'user': user, 'message': self.message, 'subject': subject, 'email_message': email_message}
txt_message = txt_template.render(txt_context)
return (subject, html_message, txt_message)
def schedule_for_all_users(self):
"""Send this notification to all users."""
if self.user:
self.history.create(user=self.user)
else:
for user in User.objects.filter(groups__name="Paid"):
self.history.create(user=user)
@python_2_unicode_compatible
class NotificationHistory(TimeStampedModel):
"""Tracks whether each user has received a notification."""
# Added by TimeStampedModel
# created = models.DateTimeField(auto_now_add=True)
# modified = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="notification_history")
notification = models.ForeignKey(Notification, on_delete=models.CASCADE, related_name="history")
sent = models.BooleanField(default=False)
sent_at = MonitorField(monitor='sent', when=[True])
def __str__(self):
"""Representation for a NotificationHistory."""
return u"<NotificationHistory ID:{}; User:{}; Notification:{}; Sent:{}>".format(self.pk, self.user, self.notification, self.sent)
def send(self):
"""Send a notification."""
(subject, html_message, txt_message) = self.notification.get_email_message(self.user)
try:
send_mail(
subject=subject,
message=txt_message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[self.user.email],
html_message=html_message
)
self.sent = True
self.save()
except SMTPException as smtpe:
logger.exception("Error sending notification: %s; Error: %s", self, smtpe)
class Meta:
"""Meta info for history."""
unique_together = ('user', 'notification')
verbose_name_plural = 'notifications history'
|
bsd-3-clause
|
Shiroy/servo
|
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/pulldom.py
|
1729
|
2302
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
mpl-2.0
|
Jusedawg/SickRage
|
sickbeard/metadata/tivo.py
|
7
|
13573
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# Author: Gordon Turner <gordonturner@gordonturner.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import io
import os
import datetime
import sickbeard
from sickbeard import logger, helpers
from sickbeard.metadata import generic
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex, ShowNotFoundException
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season ##/filename.ext (*)
show_root/Season ##/.meta/filename.ext.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generate a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'TIVO'
self._ep_nfo_extension = "txt"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.ext.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_posters = "<i>not supported</i>"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return None, None, None
def create_show_metadata(self, show_obj):
pass
def update_show_indexer_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_poster(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_episode_thumb(self, ep_obj):
pass
@staticmethod
def get_episode_thumb_path(ep_obj):
pass
def create_season_posters(self, ep_obj):
pass
def create_season_banners(self, ep_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
# Override generic class
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek(os.path.join, ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/indexer_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = ""
eps_to_write = [ep_obj] + ep_obj.relatedEps
indexer_lang = ep_obj.show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
if ep_obj.show.dvdorder:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
myShow = t[ep_obj.show.indexerid]
except sickbeard.indexer_shownotfound as e:
raise ShowNotFoundException(str(e))
except sickbeard.indexer_error as e:
logger.log(u"Unable to connect to " + sickbeard.indexerApi(
ep_obj.show.indexer).name + " while creating meta files - skipping - " + str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find episode {0:d}x{1:d} on {2}... has it been removed? Should I delete from db?".format(curEpToWrite.season, curEpToWrite.episode, sickbeard.indexerApi(ep_obj.show.indexer).name))
return None
if ep_obj.season == 0 and not getattr(myEp, 'firstaired', None):
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if not (getattr(myEp, 'episodename', None) and getattr(myEp, 'firstaired', None)):
return None
if getattr(myShow, 'seriesname', None):
data += ("title : " + myShow["seriesname"] + "\n")
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
data += ("episodeTitle : " + curEpToWrite._format_pattern('%Sx%0E %EN') + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += "isEpisode : true\n"
# Write the synopsis of the video here
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for thier data, so the series id is the zap2it_id.
if getattr(myShow, 'zap2it_id', None):
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if getattr(myShow, 'network', None):
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if getattr(myShow, '_actors', None):
for actor in myShow["_actors"]:
if 'name' in actor and actor['name'].strip():
data += ("vActor : " + actor['name'].strip() + "\n")
# This is shown on both the Program screen and the Details screen.
if getattr(myEp, 'rating', None):
try:
rating = float(myEp['rating'])
except ValueError:
rating = 0.0
# convert 10 to 4 star rating. 4 * rating / 10
# only whole numbers or half numbers work. multiply by 2, round, divide by 2.0
rating = round(8 * rating / 10) / 2.0
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if getattr(myShow, 'contentrating', None):
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre:
data += ("vProgramGenre : " + str(genre) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek(os.path.dirname, nfo_file_path)
try:
if not ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
with io.open(nfo_file_path, 'wb') as nfo_file:
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write(data.encode("utf-8"))
helpers.chmodAsParent(nfo_file_path)
except EnvironmentError as e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
logger.ERROR)
return False
return True
# present a standard "interface" from the module
metadata_class = TIVOMetadata
|
gpl-3.0
|
OneRom/external_skia
|
tools/skp/page_sets/skia_ugamsolutions_desktop.py
|
30
|
1252
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_ugamsolutions_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(15)
class SkiaUgamsolutionsDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaUgamsolutionsDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_ugamsolutions_desktop.json')
urls_list = [
# Why: for crbug.com/447291
'http://www.ugamsolutions.com',
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
|
bsd-3-clause
|
BaichuanWu/Blog_on_django
|
site-packages/django/contrib/redirects/tests.py
|
112
|
3358
|
from django import http
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
from .middleware import RedirectFallbackMiddleware
from .models import Redirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, SITE_ID=1)
class RedirectTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
with self.assertRaises(ImproperlyConfigured):
RedirectFallbackMiddleware()
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = http.HttpResponseForbidden
response_redirect_class = http.HttpResponseRedirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
|
mit
|
crafty78/ansible
|
lib/ansible/modules/utilities/logic/include_role.py
|
34
|
2684
|
#!/usr/bin/python
# -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: include_role
short_description: Load and execute a role
description:
- "Loads and executes a role as a task, this frees roles from the `role:` directive and allows them to be treated more as tasks."
version_added: "2.2"
options:
name:
description:
- The name of the role to be executed.
required: True
tasks_from:
description:
- "File to load from a Role's tasks/ directory."
required: False
default: 'main'
vars_from:
description:
- "File to load from a Role's vars/ directory."
required: False
default: 'main'
defaults_from:
description:
- "File to load from a Role's defaults/ directory."
required: False
default: 'main'
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
required: False
default: True
private:
description:
- If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.
default: None
notes:
- Handlers are made available to the whole play.
- simple dependencies seem to work fine.
- As with C(include) this task can be static or dynamic, If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options. Ansible will try to autodetect what is needed, but you can set `static` to `yes` or `no` at task level to control this.
'''
EXAMPLES = """
- include_role:
name: myrole
- name: Run tasks/other.yml instead of 'main'
include_role:
name: myrole
tasks_from: other
- name: Pass variables to role
include_role:
name: myrole
vars:
rolevar1: 'value from task'
- name: Use role in loop
include_role:
name: myrole
with_items:
- '{{ roleinput1 }}'
- '{{ roleinput2 }}'
loop_control:
loop_var: roleinputvar
- name: conditional role
include_role:
name: myrole
when: not idontwanttorun
"""
RETURN = """
# this module does not return anything except tasks to execute
"""
|
gpl-3.0
|
gearbox/GAE
|
lib/flask/config.py
|
781
|
6234
|
# -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
apache-2.0
|
samvarankashyap/linch-pin
|
linchpin/provision/roles/openshift/files/inventory.py
|
3
|
1896
|
#!/usr/bin/env python
from __future__ import absolute_import
from collections import OrderedDict
from linchpin.InventoryFilters.InventoryFilter import InventoryFilter
class Inventory(InventoryFilter):
DEFAULT_HOSTNAMES = ['metadata.name']
def get_host_data(self, res, cfgs):
"""
Returns a dict of hostnames or IP addresses for use in an Ansible
inventory file, based on available data. Only a single hostname or IP
address will be returned per instance, so as to avoid duplicate runs of
Ansible on the same host via the generated inventory file.
Each hostname contains mappings of any variable that was defined in the
cfgs section of the PinFile (e.g. __IP__) to the value in the field that
corresponds with that variable in the cfgs.
By default, the hostname will be the system field returned by libvirt
:param topo:
linchpin Libvirt resource data
:param cfgs:
map of config options from PinFile
"""
res_keys = res.keys()
res_keys.remove('resource_group')
# this is the only remaining key after removing resource type
# it corresponds with the name of the pod
key = res_keys[0]
host_data = OrderedDict()
if res['resource_group'] != 'openshift':
return host_data
var_data = cfgs.get('openshift', {})
if var_data is None:
var_data = {}
host = self.get_hostname(res[key], var_data,
self.DEFAULT_HOSTNAMES)
hostname_var = host[0]
hostname = host[1]
host_data[hostname] = {}
if '__IP__' not in list(var_data.keys()):
var_data['__IP__'] = hostname_var
host_data[hostname] = {}
self.set_config_values(host_data[hostname], res[key], var_data)
return host_data
|
gpl-3.0
|
debkbanerji/super-mem
|
ocrtest/myenv/lib/python2.7/site-packages/pip/compat/__init__.py
|
342
|
4672
|
"""Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress
except ImportError:
import ipaddr as ipaddress
ipaddress.ip_address = ipaddress.IPAddress
ipaddress.ip_network = ipaddress.IPNetwork
try:
import sysconfig
def get_stdlib():
paths = [
sysconfig.get_path("stdlib"),
sysconfig.get_path("platstdlib"),
]
return set(filter(bool, paths))
except ImportError:
from distutils import sysconfig
def get_stdlib():
paths = [
sysconfig.get_python_lib(standard_lib=True),
sysconfig.get_python_lib(standard_lib=True, plat_specific=True),
]
return set(filter(bool, paths))
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile",
"OrderedDict",
]
if sys.version_info >= (3, 4):
uses_pycache = True
from importlib.util import cache_from_source
else:
import imp
uses_pycache = hasattr(imp, 'cache_from_source')
if uses_pycache:
cache_from_source = imp.cache_from_source
else:
cache_from_source = None
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
def expanduser(path):
"""
Expand ~ and ~user constructions.
Includes a workaround for http://bugs.python.org/issue14768
"""
expanded = os.path.expanduser(path)
if path.startswith('~/') and expanded.startswith('//'):
expanded = expanded[1:]
return expanded
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ('python', 'wsgiref')
if sys.version_info >= (2, 7):
stdlib_pkgs += ('argparse',)
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
def samefile(file1, file2):
"""Provide an alternative for os.path.samefile on Windows/Python2"""
if hasattr(os.path, 'samefile'):
return os.path.samefile(file1, file2)
else:
path1 = os.path.normcase(os.path.abspath(file1))
path2 = os.path.normcase(os.path.abspath(file2))
return path1 == path2
|
mit
|
captiosus/treadmill
|
treadmill/sproc/appmonitor.py
|
1
|
6275
|
"""Syncronizes cell Zookeeper with LDAP data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import itertools
import logging
import math
import time
import click
import six
from treadmill import context
from treadmill import restclient
from treadmill import utils
from treadmill import yamlwrapper as yaml
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill import zkwatchers
_LOGGER = logging.getLogger(__name__)
# Allow 2 * count tokens to accumulate during 1 hour.
_INTERVAL = float(60 * 60)
def reevaluate(api_url, state):
"""Evaluate state and adjust app count based on monitor"""
# Disable too many branches warning.
#
# pylint: disable=R0912
grouped = dict(state['scheduled'])
monitors = dict(state['monitors'])
now = time.time()
# Increase available tokens.
for name, conf in monitors.items():
# Max value reached, nothing to do.
max_value = conf['count'] * 2
available = conf['available']
if available < max_value:
delta = conf['rate'] * (now - conf['last_update'])
conf['available'] = min(available + delta, max_value)
conf['last_update'] = now
# Allow every application to evaluate
success = True
for name, conf in monitors.items():
count = conf['count']
available = conf['available']
current_count = len(grouped.get(name, []))
_LOGGER.debug('App: %r current: %d, target %d',
name, current_count, count)
if count == current_count:
continue
elif count > current_count:
needed = count - current_count
allowed = int(min(needed, math.floor(available)))
if allowed <= 0:
continue
try:
_scheduled = restclient.post(
[api_url],
'/instance/{}?count={}'.format(name, allowed),
payload={},
headers={'X-Treadmill-Trusted-Agent': 'monitor'}
)
conf['available'] -= allowed
except Exception: # pylint: disable=W0703
_LOGGER.exception('Unable to create instances: %s: %s',
name, needed)
elif count < current_count:
extra = grouped[name][:current_count - count]
try:
response = restclient.post(
[api_url], '/instance/_bulk/delete',
payload=dict(instances=list(extra)),
headers={'X-Treadmill-Trusted-Agent': 'monitor'}
)
_LOGGER.info('deleted: %r - %s', extra, response)
except Exception: # pylint: disable=W0703
_LOGGER.exception('Unable to delete instances: %r', extra)
return success
def _run_sync(api_url):
"""Sync app monitor count with instance count."""
zkclient = context.GLOBAL.zk.conn
state = {
'scheduled': {},
'monitors': {}
}
@zkclient.ChildrenWatch(z.path.scheduled())
@utils.exit_on_unhandled
def _scheduled_watch(children):
"""Watch scheduled instances."""
scheduled = sorted(children)
grouped = collections.defaultdict(
list,
{
k: list(v)
for k, v in itertools.groupby(
scheduled,
lambda n: n.rpartition('#')[0]
)
}
)
state['scheduled'] = grouped
return True
def _watch_monitor(name):
"""Watch monitor."""
# Establish data watch on each monitor.
@zkwatchers.ExistingDataWatch(zkclient, z.path.appmonitor(name))
@utils.exit_on_unhandled
def _monitor_data_watch(data, stat, event):
"""Monitor individual monitor."""
if (event is not None and event.type == 'DELETED') or stat is None:
_LOGGER.info('Removing watch on deleted monitor: %s', name)
return
try:
count = yaml.load(data)['count']
except Exception: # pylint: disable=W0703
_LOGGER.exception('Invalid monitor: %s', name)
return
_LOGGER.info('Reconfigure monitor: %s, count: %s', name, count)
state['monitors'][name] = {
'count': count,
'available': 2.0 * count,
'last_update': time.time(),
'rate': (2.0 * count / _INTERVAL)
}
@zkclient.ChildrenWatch(z.path.appmonitor())
@utils.exit_on_unhandled
def _appmonitors_watch(children):
"""Watch app monitors."""
monitors = set(children)
extra = six.viewkeys(state['monitors']) - monitors
for name in extra:
_LOGGER.info('Removing extra monitor: %r', name)
if state['monitors'].pop(name, None) is None:
_LOGGER.warning(
'Failed to remove non-existent monitor: %r', name
)
missing = monitors - six.viewkeys(state['monitors'])
for name in missing:
_LOGGER.info('Adding missing monitor: %s', name)
_watch_monitor(name)
_LOGGER.info('Ready')
while True:
time.sleep(1)
if not reevaluate(api_url, state):
_LOGGER.error('Unhandled exception while evaluating state.')
break
def init():
"""Return top level command handler."""
@click.command()
@click.option('--no-lock', is_flag=True, default=False,
help='Run without lock.')
@click.option('--api', required=True, help='Cell API url.')
def top(no_lock, api):
"""Sync LDAP data with Zookeeper data."""
if not no_lock:
lock = zkutils.make_lock(context.GLOBAL.zk.conn,
z.path.election(__name__))
_LOGGER.info('Waiting for leader lock.')
with lock:
_run_sync(api)
else:
_LOGGER.info('Running without lock.')
_run_sync(api)
return top
|
apache-2.0
|
kustodian/ansible
|
lib/ansible/module_utils/facts/system/platform.py
|
66
|
4075
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import socket
import platform
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
# i86pc is a Solaris and derivatives-ism
SOLARIS_I86_RE_PATTERN = r'i([3456]86|86pc)'
solaris_i86_re = re.compile(SOLARIS_I86_RE_PATTERN)
class PlatformFactCollector(BaseFactCollector):
name = 'platform'
_fact_ids = set(['system',
'kernel',
'kernel_version',
'machine',
'python_version',
'architecture',
'machine_id'])
def collect(self, module=None, collected_facts=None):
platform_facts = {}
# platform.system() can be Linux, Darwin, Java, or Windows
platform_facts['system'] = platform.system()
platform_facts['kernel'] = platform.release()
platform_facts['kernel_version'] = platform.version()
platform_facts['machine'] = platform.machine()
platform_facts['python_version'] = platform.python_version()
platform_facts['fqdn'] = socket.getfqdn()
platform_facts['hostname'] = platform.node().split('.')[0]
platform_facts['nodename'] = platform.node()
platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
platform_facts['userspace_bits'] = arch_bits.replace('bit', '')
if platform_facts['machine'] == 'x86_64':
platform_facts['architecture'] = platform_facts['machine']
if platform_facts['userspace_bits'] == '64':
platform_facts['userspace_architecture'] = 'x86_64'
elif platform_facts['userspace_bits'] == '32':
platform_facts['userspace_architecture'] = 'i386'
elif solaris_i86_re.search(platform_facts['machine']):
platform_facts['architecture'] = 'i386'
if platform_facts['userspace_bits'] == '64':
platform_facts['userspace_architecture'] = 'x86_64'
elif platform_facts['userspace_bits'] == '32':
platform_facts['userspace_architecture'] = 'i386'
else:
platform_facts['architecture'] = platform_facts['machine']
if platform_facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.splitlines()
platform_facts['architecture'] = data[0]
else:
bootinfo_bin = module.get_bin_path('bootinfo')
rc, out, err = module.run_command([bootinfo_bin, '-p'])
data = out.splitlines()
platform_facts['architecture'] = data[0]
elif platform_facts['system'] == 'OpenBSD':
platform_facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.splitlines()[0]
platform_facts["machine_id"] = machine_id
return platform_facts
|
gpl-3.0
|
postlund/home-assistant
|
tests/components/zwave/test_switch.py
|
11
|
2347
|
"""Test Z-Wave switches."""
from unittest.mock import patch
from homeassistant.components.zwave import switch
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_switch(mock_openzwave):
"""Test get_device returns a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert isinstance(device, switch.ZwaveSwitch)
def test_switch_turn_on_and_off(mock_openzwave):
"""Test turning on a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is True
node.reset_mock()
device.turn_off()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is False
def test_switch_value_changed(mock_openzwave):
"""Test value changed for Z-Wave switch."""
node = MockNode()
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
value_changed(value)
assert device.is_on
@patch("time.perf_counter")
def test_switch_refresh_on_update(mock_counter, mock_openzwave):
"""Test value changed for refresh on update Z-Wave switch."""
mock_counter.return_value = 10
node = MockNode(manufacturer_id="013c", product_type="0001", product_id="0005")
value = MockValue(data=False, node=node, instance=1)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
mock_counter.return_value = 15
value.data = True
value_changed(value)
assert device.is_on
assert not node.request_state.called
mock_counter.return_value = 45
value.data = False
value_changed(value)
assert not device.is_on
assert node.request_state.called
|
apache-2.0
|
leighpauls/k2cro4
|
third_party/webdriver/pylib/test/selenium/test_default_server.py
|
16
|
2107
|
"""
Copyright 2011 Software Freedom Conservancy.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from selenium import selenium
import unittest
import sys, time
class TestDefaultServer(unittest.TestCase):
seleniumHost = 'localhost'
seleniumPort = str(4444)
#browserStartCommand = "c:\\program files\\internet explorer\\iexplore.exe"
browserStartCommand = "*firefox"
browserURL = "http://localhost:4444"
def setUp(self):
print "Using selenium server at " + self.seleniumHost + ":" + self.seleniumPort
self.selenium = selenium(self.seleniumHost, self.seleniumPort, self.browserStartCommand, self.browserURL)
self.selenium.start()
def testLinks(self):
selenium = self.selenium
selenium.open("/selenium-server/tests/html/test_click_page1.html")
self.failUnless(selenium.get_text("link").find("Click here for next page") != -1, "link 'link' doesn't contain expected text")
links = selenium.get_all_links()
self.failUnless(len(links) > 3)
self.assertEqual("linkToAnchorOnThisPage", links[3])
selenium.click("link")
selenium.wait_for_page_to_load(5000)
self.failUnless(selenium.get_location().endswith("/selenium-server/tests/html/test_click_page2.html"))
selenium.click("previousPage")
selenium.wait_for_page_to_load(5000)
self.failUnless(selenium.get_location().endswith("/selenium-server/tests/html/test_click_page1.html"))
def tearDown(self):
self.selenium.stop()
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
MuckRock/muckrock
|
muckrock/crowdsource/rules.py
|
1
|
2301
|
"""Rules based permissions for the crowdsource app"""
# pylint: disable=missing-docstring, unused-argument, invalid-unary-operand-type
# Third Party
from rules import add_perm, always_deny, is_staff, predicate
# MuckRock
from muckrock.foia.rules import has_feature_level, skip_if_not_obj, user_authenticated
@predicate
@user_authenticated
def is_experimental(user):
return user.profile.experimental
@predicate
@skip_if_not_obj
@user_authenticated
def is_owner(user, crowdsource):
return crowdsource.user == user
@predicate
@skip_if_not_obj
def is_project_only(user, crowdsource):
return crowdsource.project_only and crowdsource.project
@predicate
@skip_if_not_obj
@user_authenticated
def is_contributor(user, crowdsource):
return crowdsource.project and crowdsource.project.has_contributor(user)
@predicate
@skip_if_not_obj
@user_authenticated
def is_project_admin(user, crowdsource):
return (
crowdsource.project_admin
and crowdsource.project
and crowdsource.project.has_contributor(user)
)
@predicate
@skip_if_not_obj
def has_gallery(user, crowdsource):
return crowdsource.fields.filter(gallery=True).exists()
is_crowdsource_admin = is_owner | is_staff | is_project_admin
can_view = has_gallery | is_crowdsource_admin
add_perm("crowdsource.add_crowdsource", has_feature_level(1) | is_experimental)
add_perm("crowdsource.change_crowdsource", is_crowdsource_admin)
add_perm("crowdsource.view_crowdsource", can_view)
add_perm("crowdsource.delete_crowdsource", always_deny)
add_perm(
"crowdsource.form_crowdsource",
~is_project_only | is_contributor | is_crowdsource_admin,
)
def crowdsource_perm(perm):
@predicate("crowdsource_perm:{}".format(perm))
def inner(user, crowdsource_response):
return user.has_perm("crowdsource.{}_crowdsource".format(perm))
return inner
@predicate
@skip_if_not_obj
def is_gallery(user, response):
return response.gallery
add_perm("crowdsource.add_crowdsourceresponse", has_feature_level(1) | is_experimental)
add_perm("crowdsource.change_crowdsourceresponse", crowdsource_perm("change"))
add_perm(
"crowdsource.view_crowdsourceresponse", is_gallery | crowdsource_perm("change")
)
add_perm("crowdsource.delete_crowdsourceresponse", crowdsource_perm("delete"))
|
agpl-3.0
|
BaconPancakes/valor
|
lib/youtube_dl/extractor/metacritic.py
|
73
|
2675
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
)
class MetacriticIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?metacritic\.com/.+?/trailers/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222',
'info_dict': {
'id': '3698222',
'ext': 'mp4',
'title': 'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors',
'description': 'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.',
'duration': 221,
},
'skip': 'Not providing trailers anymore',
}, {
'url': 'http://www.metacritic.com/game/playstation-4/tales-from-the-borderlands-a-telltale-game-series/trailers/5740315',
'info_dict': {
'id': '5740315',
'ext': 'mp4',
'title': 'Tales from the Borderlands - Finale: The Vault of the Traveler',
'description': 'In the final episode of the season, all hell breaks loose. Jack is now in control of Helios\' systems, and he\'s ready to reclaim his rightful place as king of Hyperion (with or without you).',
'duration': 114,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
# The xml is not well formatted, there are raw '&'
info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
formats = []
for videoFile in clip.findall('httpURI/videoFile'):
rate_str = videoFile.find('rate').text
video_url = videoFile.find('filePath').text
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': rate_str,
'tbr': int(rate_str),
})
self._sort_formats(formats)
description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
webpage, 'description', flags=re.DOTALL)
return {
'id': video_id,
'title': clip.find('title').text,
'formats': formats,
'description': description,
'duration': int(clip.find('duration').text),
}
|
gpl-3.0
|
tzewangdorje/SIPserv
|
Twisted-13.1.0/twisted/names/test/test_common.py
|
9
|
4287
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.names.common}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyClass
from twisted.internet.interfaces import IResolver
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.failure import Failure
from twisted.names.common import ResolverBase
from twisted.names.dns import EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED, Query
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
class ExceptionForCodeTests(SynchronousTestCase):
"""
Tests for L{ResolverBase.exceptionForCode}.
"""
def setUp(self):
self.exceptionForCode = ResolverBase().exceptionForCode
def test_eformat(self):
"""
L{ResolverBase.exceptionForCode} converts L{EFORMAT} to
L{DNSFormatError}.
"""
self.assertIdentical(self.exceptionForCode(EFORMAT), DNSFormatError)
def test_eserver(self):
"""
L{ResolverBase.exceptionForCode} converts L{ESERVER} to
L{DNSServerError}.
"""
self.assertIdentical(self.exceptionForCode(ESERVER), DNSServerError)
def test_ename(self):
"""
L{ResolverBase.exceptionForCode} converts L{ENAME} to L{DNSNameError}.
"""
self.assertIdentical(self.exceptionForCode(ENAME), DNSNameError)
def test_enotimp(self):
"""
L{ResolverBase.exceptionForCode} converts L{ENOTIMP} to
L{DNSNotImplementedError}.
"""
self.assertIdentical(
self.exceptionForCode(ENOTIMP), DNSNotImplementedError)
def test_erefused(self):
"""
L{ResolverBase.exceptionForCode} converts L{EREFUSED} to
L{DNSQueryRefusedError}.
"""
self.assertIdentical(
self.exceptionForCode(EREFUSED), DNSQueryRefusedError)
def test_other(self):
"""
L{ResolverBase.exceptionForCode} converts any other response code to
L{DNSUnknownError}.
"""
self.assertIdentical(
self.exceptionForCode(object()), DNSUnknownError)
class QueryTests(SynchronousTestCase):
"""
Tests for L{ResolverBase.query}.
"""
def test_resolverBaseProvidesIResolver(self):
"""
L{ResolverBase} provides the L{IResolver} interface.
"""
verifyClass(IResolver, ResolverBase)
def test_typeToMethodDispatch(self):
"""
L{ResolverBase.query} looks up a method to invoke using the type of the
query passed to it and the C{typeToMethod} mapping on itself.
"""
results = []
resolver = ResolverBase()
resolver.typeToMethod = {
12345: lambda query, timeout: results.append((query, timeout))}
query = Query(name=b"example.com", type=12345)
resolver.query(query, 123)
self.assertEqual([(b"example.com", 123)], results)
def test_typeToMethodResult(self):
"""
L{ResolverBase.query} returns a L{Deferred} which fires with the result
of the method found in the C{typeToMethod} mapping for the type of the
query passed to it.
"""
expected = object()
resolver = ResolverBase()
resolver.typeToMethod = {54321: lambda query, timeout: expected}
query = Query(name=b"example.com", type=54321)
queryDeferred = resolver.query(query, 123)
result = []
queryDeferred.addBoth(result.append)
self.assertEqual(expected, result[0])
def test_unknownQueryType(self):
"""
L{ResolverBase.query} returns a L{Deferred} which fails with
L{NotImplementedError} when called with a query of a type not present in
its C{typeToMethod} dictionary.
"""
resolver = ResolverBase()
resolver.typeToMethod = {}
query = Query(name=b"example.com", type=12345)
queryDeferred = resolver.query(query, 123)
result = []
queryDeferred.addBoth(result.append)
self.assertIsInstance(result[0], Failure)
result[0].trap(NotImplementedError)
|
gpl-3.0
|
tumf/litecoin
|
contrib/testgen/base58.py
|
2139
|
2818
|
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
mit
|
ghostlines/ghostlines-robofont
|
src/lib/site-packages/requests/packages/idna/idnadata.py
|
155
|
35172
|
# This file is automatically generated by build-idnadata.py
scripts = {
'Greek': (
(0x370, 0x374),
(0x375, 0x378),
(0x37a, 0x37e),
(0x384, 0x385),
(0x386, 0x387),
(0x388, 0x38b),
(0x38c, 0x38d),
(0x38e, 0x3a2),
(0x3a3, 0x3e2),
(0x3f0, 0x400),
(0x1d26, 0x1d2b),
(0x1d5d, 0x1d62),
(0x1d66, 0x1d6b),
(0x1dbf, 0x1dc0),
(0x1f00, 0x1f16),
(0x1f18, 0x1f1e),
(0x1f20, 0x1f46),
(0x1f48, 0x1f4e),
(0x1f50, 0x1f58),
(0x1f59, 0x1f5a),
(0x1f5b, 0x1f5c),
(0x1f5d, 0x1f5e),
(0x1f5f, 0x1f7e),
(0x1f80, 0x1fb5),
(0x1fb6, 0x1fc5),
(0x1fc6, 0x1fd4),
(0x1fd6, 0x1fdc),
(0x1fdd, 0x1ff0),
(0x1ff2, 0x1ff5),
(0x1ff6, 0x1fff),
(0x2126, 0x2127),
(0x10140, 0x1018b),
(0x1d200, 0x1d246),
),
'Han': (
(0x2e80, 0x2e9a),
(0x2e9b, 0x2ef4),
(0x2f00, 0x2fd6),
(0x3005, 0x3006),
(0x3007, 0x3008),
(0x3021, 0x302a),
(0x3038, 0x303c),
(0x3400, 0x4db6),
(0x4e00, 0x9fcd),
(0xf900, 0xfa6e),
(0xfa70, 0xfada),
(0x20000, 0x2a6d7),
(0x2a700, 0x2b735),
(0x2b740, 0x2b81e),
(0x2f800, 0x2fa1e),
),
'Hebrew': (
(0x591, 0x5c8),
(0x5d0, 0x5eb),
(0x5f0, 0x5f5),
(0xfb1d, 0xfb37),
(0xfb38, 0xfb3d),
(0xfb3e, 0xfb3f),
(0xfb40, 0xfb42),
(0xfb43, 0xfb45),
(0xfb46, 0xfb50),
),
'Hiragana': (
(0x3041, 0x3097),
(0x309d, 0x30a0),
(0x1b001, 0x1b002),
(0x1f200, 0x1f201),
),
'Katakana': (
(0x30a1, 0x30fb),
(0x30fd, 0x3100),
(0x31f0, 0x3200),
(0x32d0, 0x32ff),
(0x3300, 0x3358),
(0xff66, 0xff70),
(0xff71, 0xff9e),
(0x1b000, 0x1b001),
),
}
joining_types = {
0x600: 'U',
0x601: 'U',
0x602: 'U',
0x603: 'U',
0x604: 'U',
0x608: 'U',
0x60b: 'U',
0x620: 'D',
0x621: 'U',
0x622: 'R',
0x623: 'R',
0x624: 'R',
0x625: 'R',
0x626: 'D',
0x627: 'R',
0x628: 'D',
0x629: 'R',
0x62a: 'D',
0x62b: 'D',
0x62c: 'D',
0x62d: 'D',
0x62e: 'D',
0x62f: 'R',
0x630: 'R',
0x631: 'R',
0x632: 'R',
0x633: 'D',
0x634: 'D',
0x635: 'D',
0x636: 'D',
0x637: 'D',
0x638: 'D',
0x639: 'D',
0x63a: 'D',
0x63b: 'D',
0x63c: 'D',
0x63d: 'D',
0x63e: 'D',
0x63f: 'D',
0x640: 'C',
0x641: 'D',
0x642: 'D',
0x643: 'D',
0x644: 'D',
0x645: 'D',
0x646: 'D',
0x647: 'D',
0x648: 'R',
0x649: 'D',
0x64a: 'D',
0x66e: 'D',
0x66f: 'D',
0x671: 'R',
0x672: 'R',
0x673: 'R',
0x674: 'U',
0x675: 'R',
0x676: 'R',
0x677: 'R',
0x678: 'D',
0x679: 'D',
0x67a: 'D',
0x67b: 'D',
0x67c: 'D',
0x67d: 'D',
0x67e: 'D',
0x67f: 'D',
0x680: 'D',
0x681: 'D',
0x682: 'D',
0x683: 'D',
0x684: 'D',
0x685: 'D',
0x686: 'D',
0x687: 'D',
0x688: 'R',
0x689: 'R',
0x68a: 'R',
0x68b: 'R',
0x68c: 'R',
0x68d: 'R',
0x68e: 'R',
0x68f: 'R',
0x690: 'R',
0x691: 'R',
0x692: 'R',
0x693: 'R',
0x694: 'R',
0x695: 'R',
0x696: 'R',
0x697: 'R',
0x698: 'R',
0x699: 'R',
0x69a: 'D',
0x69b: 'D',
0x69c: 'D',
0x69d: 'D',
0x69e: 'D',
0x69f: 'D',
0x6a0: 'D',
0x6a1: 'D',
0x6a2: 'D',
0x6a3: 'D',
0x6a4: 'D',
0x6a5: 'D',
0x6a6: 'D',
0x6a7: 'D',
0x6a8: 'D',
0x6a9: 'D',
0x6aa: 'D',
0x6ab: 'D',
0x6ac: 'D',
0x6ad: 'D',
0x6ae: 'D',
0x6af: 'D',
0x6b0: 'D',
0x6b1: 'D',
0x6b2: 'D',
0x6b3: 'D',
0x6b4: 'D',
0x6b5: 'D',
0x6b6: 'D',
0x6b7: 'D',
0x6b8: 'D',
0x6b9: 'D',
0x6ba: 'D',
0x6bb: 'D',
0x6bc: 'D',
0x6bd: 'D',
0x6be: 'D',
0x6bf: 'D',
0x6c0: 'R',
0x6c1: 'D',
0x6c2: 'D',
0x6c3: 'R',
0x6c4: 'R',
0x6c5: 'R',
0x6c6: 'R',
0x6c7: 'R',
0x6c8: 'R',
0x6c9: 'R',
0x6ca: 'R',
0x6cb: 'R',
0x6cc: 'D',
0x6cd: 'R',
0x6ce: 'D',
0x6cf: 'R',
0x6d0: 'D',
0x6d1: 'D',
0x6d2: 'R',
0x6d3: 'R',
0x6d5: 'R',
0x6dd: 'U',
0x6ee: 'R',
0x6ef: 'R',
0x6fa: 'D',
0x6fb: 'D',
0x6fc: 'D',
0x6ff: 'D',
0x710: 'R',
0x712: 'D',
0x713: 'D',
0x714: 'D',
0x715: 'R',
0x716: 'R',
0x717: 'R',
0x718: 'R',
0x719: 'R',
0x71a: 'D',
0x71b: 'D',
0x71c: 'D',
0x71d: 'D',
0x71e: 'R',
0x71f: 'D',
0x720: 'D',
0x721: 'D',
0x722: 'D',
0x723: 'D',
0x724: 'D',
0x725: 'D',
0x726: 'D',
0x727: 'D',
0x728: 'R',
0x729: 'D',
0x72a: 'R',
0x72b: 'D',
0x72c: 'R',
0x72d: 'D',
0x72e: 'D',
0x72f: 'R',
0x74d: 'R',
0x74e: 'D',
0x74f: 'D',
0x750: 'D',
0x751: 'D',
0x752: 'D',
0x753: 'D',
0x754: 'D',
0x755: 'D',
0x756: 'D',
0x757: 'D',
0x758: 'D',
0x759: 'R',
0x75a: 'R',
0x75b: 'R',
0x75c: 'D',
0x75d: 'D',
0x75e: 'D',
0x75f: 'D',
0x760: 'D',
0x761: 'D',
0x762: 'D',
0x763: 'D',
0x764: 'D',
0x765: 'D',
0x766: 'D',
0x767: 'D',
0x768: 'D',
0x769: 'D',
0x76a: 'D',
0x76b: 'R',
0x76c: 'R',
0x76d: 'D',
0x76e: 'D',
0x76f: 'D',
0x770: 'D',
0x771: 'R',
0x772: 'D',
0x773: 'R',
0x774: 'R',
0x775: 'D',
0x776: 'D',
0x777: 'D',
0x778: 'R',
0x779: 'R',
0x77a: 'D',
0x77b: 'D',
0x77c: 'D',
0x77d: 'D',
0x77e: 'D',
0x77f: 'D',
0x7ca: 'D',
0x7cb: 'D',
0x7cc: 'D',
0x7cd: 'D',
0x7ce: 'D',
0x7cf: 'D',
0x7d0: 'D',
0x7d1: 'D',
0x7d2: 'D',
0x7d3: 'D',
0x7d4: 'D',
0x7d5: 'D',
0x7d6: 'D',
0x7d7: 'D',
0x7d8: 'D',
0x7d9: 'D',
0x7da: 'D',
0x7db: 'D',
0x7dc: 'D',
0x7dd: 'D',
0x7de: 'D',
0x7df: 'D',
0x7e0: 'D',
0x7e1: 'D',
0x7e2: 'D',
0x7e3: 'D',
0x7e4: 'D',
0x7e5: 'D',
0x7e6: 'D',
0x7e7: 'D',
0x7e8: 'D',
0x7e9: 'D',
0x7ea: 'D',
0x7fa: 'C',
0x840: 'R',
0x841: 'D',
0x842: 'D',
0x843: 'D',
0x844: 'D',
0x845: 'D',
0x846: 'R',
0x847: 'D',
0x848: 'D',
0x849: 'R',
0x84a: 'D',
0x84b: 'D',
0x84c: 'D',
0x84d: 'D',
0x84e: 'D',
0x84f: 'R',
0x850: 'D',
0x851: 'D',
0x852: 'D',
0x853: 'D',
0x854: 'R',
0x855: 'D',
0x856: 'U',
0x857: 'U',
0x858: 'U',
0x8a0: 'D',
0x8a2: 'D',
0x8a3: 'D',
0x8a4: 'D',
0x8a5: 'D',
0x8a6: 'D',
0x8a7: 'D',
0x8a8: 'D',
0x8a9: 'D',
0x8aa: 'R',
0x8ab: 'R',
0x8ac: 'R',
0x1806: 'U',
0x1807: 'D',
0x180a: 'C',
0x180e: 'U',
0x1820: 'D',
0x1821: 'D',
0x1822: 'D',
0x1823: 'D',
0x1824: 'D',
0x1825: 'D',
0x1826: 'D',
0x1827: 'D',
0x1828: 'D',
0x1829: 'D',
0x182a: 'D',
0x182b: 'D',
0x182c: 'D',
0x182d: 'D',
0x182e: 'D',
0x182f: 'D',
0x1830: 'D',
0x1831: 'D',
0x1832: 'D',
0x1833: 'D',
0x1834: 'D',
0x1835: 'D',
0x1836: 'D',
0x1837: 'D',
0x1838: 'D',
0x1839: 'D',
0x183a: 'D',
0x183b: 'D',
0x183c: 'D',
0x183d: 'D',
0x183e: 'D',
0x183f: 'D',
0x1840: 'D',
0x1841: 'D',
0x1842: 'D',
0x1843: 'D',
0x1844: 'D',
0x1845: 'D',
0x1846: 'D',
0x1847: 'D',
0x1848: 'D',
0x1849: 'D',
0x184a: 'D',
0x184b: 'D',
0x184c: 'D',
0x184d: 'D',
0x184e: 'D',
0x184f: 'D',
0x1850: 'D',
0x1851: 'D',
0x1852: 'D',
0x1853: 'D',
0x1854: 'D',
0x1855: 'D',
0x1856: 'D',
0x1857: 'D',
0x1858: 'D',
0x1859: 'D',
0x185a: 'D',
0x185b: 'D',
0x185c: 'D',
0x185d: 'D',
0x185e: 'D',
0x185f: 'D',
0x1860: 'D',
0x1861: 'D',
0x1862: 'D',
0x1863: 'D',
0x1864: 'D',
0x1865: 'D',
0x1866: 'D',
0x1867: 'D',
0x1868: 'D',
0x1869: 'D',
0x186a: 'D',
0x186b: 'D',
0x186c: 'D',
0x186d: 'D',
0x186e: 'D',
0x186f: 'D',
0x1870: 'D',
0x1871: 'D',
0x1872: 'D',
0x1873: 'D',
0x1874: 'D',
0x1875: 'D',
0x1876: 'D',
0x1877: 'D',
0x1880: 'U',
0x1881: 'U',
0x1882: 'U',
0x1883: 'U',
0x1884: 'U',
0x1885: 'U',
0x1886: 'U',
0x1887: 'D',
0x1888: 'D',
0x1889: 'D',
0x188a: 'D',
0x188b: 'D',
0x188c: 'D',
0x188d: 'D',
0x188e: 'D',
0x188f: 'D',
0x1890: 'D',
0x1891: 'D',
0x1892: 'D',
0x1893: 'D',
0x1894: 'D',
0x1895: 'D',
0x1896: 'D',
0x1897: 'D',
0x1898: 'D',
0x1899: 'D',
0x189a: 'D',
0x189b: 'D',
0x189c: 'D',
0x189d: 'D',
0x189e: 'D',
0x189f: 'D',
0x18a0: 'D',
0x18a1: 'D',
0x18a2: 'D',
0x18a3: 'D',
0x18a4: 'D',
0x18a5: 'D',
0x18a6: 'D',
0x18a7: 'D',
0x18a8: 'D',
0x18aa: 'D',
0x200c: 'U',
0x200d: 'C',
0x2066: 'U',
0x2067: 'U',
0x2068: 'U',
0x2069: 'U',
0xa840: 'D',
0xa841: 'D',
0xa842: 'D',
0xa843: 'D',
0xa844: 'D',
0xa845: 'D',
0xa846: 'D',
0xa847: 'D',
0xa848: 'D',
0xa849: 'D',
0xa84a: 'D',
0xa84b: 'D',
0xa84c: 'D',
0xa84d: 'D',
0xa84e: 'D',
0xa84f: 'D',
0xa850: 'D',
0xa851: 'D',
0xa852: 'D',
0xa853: 'D',
0xa854: 'D',
0xa855: 'D',
0xa856: 'D',
0xa857: 'D',
0xa858: 'D',
0xa859: 'D',
0xa85a: 'D',
0xa85b: 'D',
0xa85c: 'D',
0xa85d: 'D',
0xa85e: 'D',
0xa85f: 'D',
0xa860: 'D',
0xa861: 'D',
0xa862: 'D',
0xa863: 'D',
0xa864: 'D',
0xa865: 'D',
0xa866: 'D',
0xa867: 'D',
0xa868: 'D',
0xa869: 'D',
0xa86a: 'D',
0xa86b: 'D',
0xa86c: 'D',
0xa86d: 'D',
0xa86e: 'D',
0xa86f: 'D',
0xa870: 'D',
0xa871: 'D',
0xa872: 'L',
0xa873: 'U',
}
codepoint_classes = {
'PVALID': (
(0x2d, 0x2e),
(0x30, 0x3a),
(0x61, 0x7b),
(0xdf, 0xf7),
(0xf8, 0x100),
(0x101, 0x102),
(0x103, 0x104),
(0x105, 0x106),
(0x107, 0x108),
(0x109, 0x10a),
(0x10b, 0x10c),
(0x10d, 0x10e),
(0x10f, 0x110),
(0x111, 0x112),
(0x113, 0x114),
(0x115, 0x116),
(0x117, 0x118),
(0x119, 0x11a),
(0x11b, 0x11c),
(0x11d, 0x11e),
(0x11f, 0x120),
(0x121, 0x122),
(0x123, 0x124),
(0x125, 0x126),
(0x127, 0x128),
(0x129, 0x12a),
(0x12b, 0x12c),
(0x12d, 0x12e),
(0x12f, 0x130),
(0x131, 0x132),
(0x135, 0x136),
(0x137, 0x139),
(0x13a, 0x13b),
(0x13c, 0x13d),
(0x13e, 0x13f),
(0x142, 0x143),
(0x144, 0x145),
(0x146, 0x147),
(0x148, 0x149),
(0x14b, 0x14c),
(0x14d, 0x14e),
(0x14f, 0x150),
(0x151, 0x152),
(0x153, 0x154),
(0x155, 0x156),
(0x157, 0x158),
(0x159, 0x15a),
(0x15b, 0x15c),
(0x15d, 0x15e),
(0x15f, 0x160),
(0x161, 0x162),
(0x163, 0x164),
(0x165, 0x166),
(0x167, 0x168),
(0x169, 0x16a),
(0x16b, 0x16c),
(0x16d, 0x16e),
(0x16f, 0x170),
(0x171, 0x172),
(0x173, 0x174),
(0x175, 0x176),
(0x177, 0x178),
(0x17a, 0x17b),
(0x17c, 0x17d),
(0x17e, 0x17f),
(0x180, 0x181),
(0x183, 0x184),
(0x185, 0x186),
(0x188, 0x189),
(0x18c, 0x18e),
(0x192, 0x193),
(0x195, 0x196),
(0x199, 0x19c),
(0x19e, 0x19f),
(0x1a1, 0x1a2),
(0x1a3, 0x1a4),
(0x1a5, 0x1a6),
(0x1a8, 0x1a9),
(0x1aa, 0x1ac),
(0x1ad, 0x1ae),
(0x1b0, 0x1b1),
(0x1b4, 0x1b5),
(0x1b6, 0x1b7),
(0x1b9, 0x1bc),
(0x1bd, 0x1c4),
(0x1ce, 0x1cf),
(0x1d0, 0x1d1),
(0x1d2, 0x1d3),
(0x1d4, 0x1d5),
(0x1d6, 0x1d7),
(0x1d8, 0x1d9),
(0x1da, 0x1db),
(0x1dc, 0x1de),
(0x1df, 0x1e0),
(0x1e1, 0x1e2),
(0x1e3, 0x1e4),
(0x1e5, 0x1e6),
(0x1e7, 0x1e8),
(0x1e9, 0x1ea),
(0x1eb, 0x1ec),
(0x1ed, 0x1ee),
(0x1ef, 0x1f1),
(0x1f5, 0x1f6),
(0x1f9, 0x1fa),
(0x1fb, 0x1fc),
(0x1fd, 0x1fe),
(0x1ff, 0x200),
(0x201, 0x202),
(0x203, 0x204),
(0x205, 0x206),
(0x207, 0x208),
(0x209, 0x20a),
(0x20b, 0x20c),
(0x20d, 0x20e),
(0x20f, 0x210),
(0x211, 0x212),
(0x213, 0x214),
(0x215, 0x216),
(0x217, 0x218),
(0x219, 0x21a),
(0x21b, 0x21c),
(0x21d, 0x21e),
(0x21f, 0x220),
(0x221, 0x222),
(0x223, 0x224),
(0x225, 0x226),
(0x227, 0x228),
(0x229, 0x22a),
(0x22b, 0x22c),
(0x22d, 0x22e),
(0x22f, 0x230),
(0x231, 0x232),
(0x233, 0x23a),
(0x23c, 0x23d),
(0x23f, 0x241),
(0x242, 0x243),
(0x247, 0x248),
(0x249, 0x24a),
(0x24b, 0x24c),
(0x24d, 0x24e),
(0x24f, 0x2b0),
(0x2b9, 0x2c2),
(0x2c6, 0x2d2),
(0x2ec, 0x2ed),
(0x2ee, 0x2ef),
(0x300, 0x340),
(0x342, 0x343),
(0x346, 0x34f),
(0x350, 0x370),
(0x371, 0x372),
(0x373, 0x374),
(0x377, 0x378),
(0x37b, 0x37e),
(0x390, 0x391),
(0x3ac, 0x3cf),
(0x3d7, 0x3d8),
(0x3d9, 0x3da),
(0x3db, 0x3dc),
(0x3dd, 0x3de),
(0x3df, 0x3e0),
(0x3e1, 0x3e2),
(0x3e3, 0x3e4),
(0x3e5, 0x3e6),
(0x3e7, 0x3e8),
(0x3e9, 0x3ea),
(0x3eb, 0x3ec),
(0x3ed, 0x3ee),
(0x3ef, 0x3f0),
(0x3f3, 0x3f4),
(0x3f8, 0x3f9),
(0x3fb, 0x3fd),
(0x430, 0x460),
(0x461, 0x462),
(0x463, 0x464),
(0x465, 0x466),
(0x467, 0x468),
(0x469, 0x46a),
(0x46b, 0x46c),
(0x46d, 0x46e),
(0x46f, 0x470),
(0x471, 0x472),
(0x473, 0x474),
(0x475, 0x476),
(0x477, 0x478),
(0x479, 0x47a),
(0x47b, 0x47c),
(0x47d, 0x47e),
(0x47f, 0x480),
(0x481, 0x482),
(0x483, 0x488),
(0x48b, 0x48c),
(0x48d, 0x48e),
(0x48f, 0x490),
(0x491, 0x492),
(0x493, 0x494),
(0x495, 0x496),
(0x497, 0x498),
(0x499, 0x49a),
(0x49b, 0x49c),
(0x49d, 0x49e),
(0x49f, 0x4a0),
(0x4a1, 0x4a2),
(0x4a3, 0x4a4),
(0x4a5, 0x4a6),
(0x4a7, 0x4a8),
(0x4a9, 0x4aa),
(0x4ab, 0x4ac),
(0x4ad, 0x4ae),
(0x4af, 0x4b0),
(0x4b1, 0x4b2),
(0x4b3, 0x4b4),
(0x4b5, 0x4b6),
(0x4b7, 0x4b8),
(0x4b9, 0x4ba),
(0x4bb, 0x4bc),
(0x4bd, 0x4be),
(0x4bf, 0x4c0),
(0x4c2, 0x4c3),
(0x4c4, 0x4c5),
(0x4c6, 0x4c7),
(0x4c8, 0x4c9),
(0x4ca, 0x4cb),
(0x4cc, 0x4cd),
(0x4ce, 0x4d0),
(0x4d1, 0x4d2),
(0x4d3, 0x4d4),
(0x4d5, 0x4d6),
(0x4d7, 0x4d8),
(0x4d9, 0x4da),
(0x4db, 0x4dc),
(0x4dd, 0x4de),
(0x4df, 0x4e0),
(0x4e1, 0x4e2),
(0x4e3, 0x4e4),
(0x4e5, 0x4e6),
(0x4e7, 0x4e8),
(0x4e9, 0x4ea),
(0x4eb, 0x4ec),
(0x4ed, 0x4ee),
(0x4ef, 0x4f0),
(0x4f1, 0x4f2),
(0x4f3, 0x4f4),
(0x4f5, 0x4f6),
(0x4f7, 0x4f8),
(0x4f9, 0x4fa),
(0x4fb, 0x4fc),
(0x4fd, 0x4fe),
(0x4ff, 0x500),
(0x501, 0x502),
(0x503, 0x504),
(0x505, 0x506),
(0x507, 0x508),
(0x509, 0x50a),
(0x50b, 0x50c),
(0x50d, 0x50e),
(0x50f, 0x510),
(0x511, 0x512),
(0x513, 0x514),
(0x515, 0x516),
(0x517, 0x518),
(0x519, 0x51a),
(0x51b, 0x51c),
(0x51d, 0x51e),
(0x51f, 0x520),
(0x521, 0x522),
(0x523, 0x524),
(0x525, 0x526),
(0x527, 0x528),
(0x559, 0x55a),
(0x561, 0x587),
(0x591, 0x5be),
(0x5bf, 0x5c0),
(0x5c1, 0x5c3),
(0x5c4, 0x5c6),
(0x5c7, 0x5c8),
(0x5d0, 0x5eb),
(0x5f0, 0x5f3),
(0x610, 0x61b),
(0x620, 0x640),
(0x641, 0x660),
(0x66e, 0x675),
(0x679, 0x6d4),
(0x6d5, 0x6dd),
(0x6df, 0x6e9),
(0x6ea, 0x6f0),
(0x6fa, 0x700),
(0x710, 0x74b),
(0x74d, 0x7b2),
(0x7c0, 0x7f6),
(0x800, 0x82e),
(0x840, 0x85c),
(0x8a0, 0x8a1),
(0x8a2, 0x8ad),
(0x8e4, 0x8ff),
(0x900, 0x958),
(0x960, 0x964),
(0x966, 0x970),
(0x971, 0x978),
(0x979, 0x980),
(0x981, 0x984),
(0x985, 0x98d),
(0x98f, 0x991),
(0x993, 0x9a9),
(0x9aa, 0x9b1),
(0x9b2, 0x9b3),
(0x9b6, 0x9ba),
(0x9bc, 0x9c5),
(0x9c7, 0x9c9),
(0x9cb, 0x9cf),
(0x9d7, 0x9d8),
(0x9e0, 0x9e4),
(0x9e6, 0x9f2),
(0xa01, 0xa04),
(0xa05, 0xa0b),
(0xa0f, 0xa11),
(0xa13, 0xa29),
(0xa2a, 0xa31),
(0xa32, 0xa33),
(0xa35, 0xa36),
(0xa38, 0xa3a),
(0xa3c, 0xa3d),
(0xa3e, 0xa43),
(0xa47, 0xa49),
(0xa4b, 0xa4e),
(0xa51, 0xa52),
(0xa5c, 0xa5d),
(0xa66, 0xa76),
(0xa81, 0xa84),
(0xa85, 0xa8e),
(0xa8f, 0xa92),
(0xa93, 0xaa9),
(0xaaa, 0xab1),
(0xab2, 0xab4),
(0xab5, 0xaba),
(0xabc, 0xac6),
(0xac7, 0xaca),
(0xacb, 0xace),
(0xad0, 0xad1),
(0xae0, 0xae4),
(0xae6, 0xaf0),
(0xb01, 0xb04),
(0xb05, 0xb0d),
(0xb0f, 0xb11),
(0xb13, 0xb29),
(0xb2a, 0xb31),
(0xb32, 0xb34),
(0xb35, 0xb3a),
(0xb3c, 0xb45),
(0xb47, 0xb49),
(0xb4b, 0xb4e),
(0xb56, 0xb58),
(0xb5f, 0xb64),
(0xb66, 0xb70),
(0xb71, 0xb72),
(0xb82, 0xb84),
(0xb85, 0xb8b),
(0xb8e, 0xb91),
(0xb92, 0xb96),
(0xb99, 0xb9b),
(0xb9c, 0xb9d),
(0xb9e, 0xba0),
(0xba3, 0xba5),
(0xba8, 0xbab),
(0xbae, 0xbba),
(0xbbe, 0xbc3),
(0xbc6, 0xbc9),
(0xbca, 0xbce),
(0xbd0, 0xbd1),
(0xbd7, 0xbd8),
(0xbe6, 0xbf0),
(0xc01, 0xc04),
(0xc05, 0xc0d),
(0xc0e, 0xc11),
(0xc12, 0xc29),
(0xc2a, 0xc34),
(0xc35, 0xc3a),
(0xc3d, 0xc45),
(0xc46, 0xc49),
(0xc4a, 0xc4e),
(0xc55, 0xc57),
(0xc58, 0xc5a),
(0xc60, 0xc64),
(0xc66, 0xc70),
(0xc82, 0xc84),
(0xc85, 0xc8d),
(0xc8e, 0xc91),
(0xc92, 0xca9),
(0xcaa, 0xcb4),
(0xcb5, 0xcba),
(0xcbc, 0xcc5),
(0xcc6, 0xcc9),
(0xcca, 0xcce),
(0xcd5, 0xcd7),
(0xcde, 0xcdf),
(0xce0, 0xce4),
(0xce6, 0xcf0),
(0xcf1, 0xcf3),
(0xd02, 0xd04),
(0xd05, 0xd0d),
(0xd0e, 0xd11),
(0xd12, 0xd3b),
(0xd3d, 0xd45),
(0xd46, 0xd49),
(0xd4a, 0xd4f),
(0xd57, 0xd58),
(0xd60, 0xd64),
(0xd66, 0xd70),
(0xd7a, 0xd80),
(0xd82, 0xd84),
(0xd85, 0xd97),
(0xd9a, 0xdb2),
(0xdb3, 0xdbc),
(0xdbd, 0xdbe),
(0xdc0, 0xdc7),
(0xdca, 0xdcb),
(0xdcf, 0xdd5),
(0xdd6, 0xdd7),
(0xdd8, 0xde0),
(0xdf2, 0xdf4),
(0xe01, 0xe33),
(0xe34, 0xe3b),
(0xe40, 0xe4f),
(0xe50, 0xe5a),
(0xe81, 0xe83),
(0xe84, 0xe85),
(0xe87, 0xe89),
(0xe8a, 0xe8b),
(0xe8d, 0xe8e),
(0xe94, 0xe98),
(0xe99, 0xea0),
(0xea1, 0xea4),
(0xea5, 0xea6),
(0xea7, 0xea8),
(0xeaa, 0xeac),
(0xead, 0xeb3),
(0xeb4, 0xeba),
(0xebb, 0xebe),
(0xec0, 0xec5),
(0xec6, 0xec7),
(0xec8, 0xece),
(0xed0, 0xeda),
(0xede, 0xee0),
(0xf00, 0xf01),
(0xf0b, 0xf0c),
(0xf18, 0xf1a),
(0xf20, 0xf2a),
(0xf35, 0xf36),
(0xf37, 0xf38),
(0xf39, 0xf3a),
(0xf3e, 0xf43),
(0xf44, 0xf48),
(0xf49, 0xf4d),
(0xf4e, 0xf52),
(0xf53, 0xf57),
(0xf58, 0xf5c),
(0xf5d, 0xf69),
(0xf6a, 0xf6d),
(0xf71, 0xf73),
(0xf74, 0xf75),
(0xf7a, 0xf81),
(0xf82, 0xf85),
(0xf86, 0xf93),
(0xf94, 0xf98),
(0xf99, 0xf9d),
(0xf9e, 0xfa2),
(0xfa3, 0xfa7),
(0xfa8, 0xfac),
(0xfad, 0xfb9),
(0xfba, 0xfbd),
(0xfc6, 0xfc7),
(0x1000, 0x104a),
(0x1050, 0x109e),
(0x10d0, 0x10fb),
(0x10fd, 0x1100),
(0x1200, 0x1249),
(0x124a, 0x124e),
(0x1250, 0x1257),
(0x1258, 0x1259),
(0x125a, 0x125e),
(0x1260, 0x1289),
(0x128a, 0x128e),
(0x1290, 0x12b1),
(0x12b2, 0x12b6),
(0x12b8, 0x12bf),
(0x12c0, 0x12c1),
(0x12c2, 0x12c6),
(0x12c8, 0x12d7),
(0x12d8, 0x1311),
(0x1312, 0x1316),
(0x1318, 0x135b),
(0x135d, 0x1360),
(0x1380, 0x1390),
(0x13a0, 0x13f5),
(0x1401, 0x166d),
(0x166f, 0x1680),
(0x1681, 0x169b),
(0x16a0, 0x16eb),
(0x1700, 0x170d),
(0x170e, 0x1715),
(0x1720, 0x1735),
(0x1740, 0x1754),
(0x1760, 0x176d),
(0x176e, 0x1771),
(0x1772, 0x1774),
(0x1780, 0x17b4),
(0x17b6, 0x17d4),
(0x17d7, 0x17d8),
(0x17dc, 0x17de),
(0x17e0, 0x17ea),
(0x1810, 0x181a),
(0x1820, 0x1878),
(0x1880, 0x18ab),
(0x18b0, 0x18f6),
(0x1900, 0x191d),
(0x1920, 0x192c),
(0x1930, 0x193c),
(0x1946, 0x196e),
(0x1970, 0x1975),
(0x1980, 0x19ac),
(0x19b0, 0x19ca),
(0x19d0, 0x19da),
(0x1a00, 0x1a1c),
(0x1a20, 0x1a5f),
(0x1a60, 0x1a7d),
(0x1a7f, 0x1a8a),
(0x1a90, 0x1a9a),
(0x1aa7, 0x1aa8),
(0x1b00, 0x1b4c),
(0x1b50, 0x1b5a),
(0x1b6b, 0x1b74),
(0x1b80, 0x1bf4),
(0x1c00, 0x1c38),
(0x1c40, 0x1c4a),
(0x1c4d, 0x1c7e),
(0x1cd0, 0x1cd3),
(0x1cd4, 0x1cf7),
(0x1d00, 0x1d2c),
(0x1d2f, 0x1d30),
(0x1d3b, 0x1d3c),
(0x1d4e, 0x1d4f),
(0x1d6b, 0x1d78),
(0x1d79, 0x1d9b),
(0x1dc0, 0x1de7),
(0x1dfc, 0x1e00),
(0x1e01, 0x1e02),
(0x1e03, 0x1e04),
(0x1e05, 0x1e06),
(0x1e07, 0x1e08),
(0x1e09, 0x1e0a),
(0x1e0b, 0x1e0c),
(0x1e0d, 0x1e0e),
(0x1e0f, 0x1e10),
(0x1e11, 0x1e12),
(0x1e13, 0x1e14),
(0x1e15, 0x1e16),
(0x1e17, 0x1e18),
(0x1e19, 0x1e1a),
(0x1e1b, 0x1e1c),
(0x1e1d, 0x1e1e),
(0x1e1f, 0x1e20),
(0x1e21, 0x1e22),
(0x1e23, 0x1e24),
(0x1e25, 0x1e26),
(0x1e27, 0x1e28),
(0x1e29, 0x1e2a),
(0x1e2b, 0x1e2c),
(0x1e2d, 0x1e2e),
(0x1e2f, 0x1e30),
(0x1e31, 0x1e32),
(0x1e33, 0x1e34),
(0x1e35, 0x1e36),
(0x1e37, 0x1e38),
(0x1e39, 0x1e3a),
(0x1e3b, 0x1e3c),
(0x1e3d, 0x1e3e),
(0x1e3f, 0x1e40),
(0x1e41, 0x1e42),
(0x1e43, 0x1e44),
(0x1e45, 0x1e46),
(0x1e47, 0x1e48),
(0x1e49, 0x1e4a),
(0x1e4b, 0x1e4c),
(0x1e4d, 0x1e4e),
(0x1e4f, 0x1e50),
(0x1e51, 0x1e52),
(0x1e53, 0x1e54),
(0x1e55, 0x1e56),
(0x1e57, 0x1e58),
(0x1e59, 0x1e5a),
(0x1e5b, 0x1e5c),
(0x1e5d, 0x1e5e),
(0x1e5f, 0x1e60),
(0x1e61, 0x1e62),
(0x1e63, 0x1e64),
(0x1e65, 0x1e66),
(0x1e67, 0x1e68),
(0x1e69, 0x1e6a),
(0x1e6b, 0x1e6c),
(0x1e6d, 0x1e6e),
(0x1e6f, 0x1e70),
(0x1e71, 0x1e72),
(0x1e73, 0x1e74),
(0x1e75, 0x1e76),
(0x1e77, 0x1e78),
(0x1e79, 0x1e7a),
(0x1e7b, 0x1e7c),
(0x1e7d, 0x1e7e),
(0x1e7f, 0x1e80),
(0x1e81, 0x1e82),
(0x1e83, 0x1e84),
(0x1e85, 0x1e86),
(0x1e87, 0x1e88),
(0x1e89, 0x1e8a),
(0x1e8b, 0x1e8c),
(0x1e8d, 0x1e8e),
(0x1e8f, 0x1e90),
(0x1e91, 0x1e92),
(0x1e93, 0x1e94),
(0x1e95, 0x1e9a),
(0x1e9c, 0x1e9e),
(0x1e9f, 0x1ea0),
(0x1ea1, 0x1ea2),
(0x1ea3, 0x1ea4),
(0x1ea5, 0x1ea6),
(0x1ea7, 0x1ea8),
(0x1ea9, 0x1eaa),
(0x1eab, 0x1eac),
(0x1ead, 0x1eae),
(0x1eaf, 0x1eb0),
(0x1eb1, 0x1eb2),
(0x1eb3, 0x1eb4),
(0x1eb5, 0x1eb6),
(0x1eb7, 0x1eb8),
(0x1eb9, 0x1eba),
(0x1ebb, 0x1ebc),
(0x1ebd, 0x1ebe),
(0x1ebf, 0x1ec0),
(0x1ec1, 0x1ec2),
(0x1ec3, 0x1ec4),
(0x1ec5, 0x1ec6),
(0x1ec7, 0x1ec8),
(0x1ec9, 0x1eca),
(0x1ecb, 0x1ecc),
(0x1ecd, 0x1ece),
(0x1ecf, 0x1ed0),
(0x1ed1, 0x1ed2),
(0x1ed3, 0x1ed4),
(0x1ed5, 0x1ed6),
(0x1ed7, 0x1ed8),
(0x1ed9, 0x1eda),
(0x1edb, 0x1edc),
(0x1edd, 0x1ede),
(0x1edf, 0x1ee0),
(0x1ee1, 0x1ee2),
(0x1ee3, 0x1ee4),
(0x1ee5, 0x1ee6),
(0x1ee7, 0x1ee8),
(0x1ee9, 0x1eea),
(0x1eeb, 0x1eec),
(0x1eed, 0x1eee),
(0x1eef, 0x1ef0),
(0x1ef1, 0x1ef2),
(0x1ef3, 0x1ef4),
(0x1ef5, 0x1ef6),
(0x1ef7, 0x1ef8),
(0x1ef9, 0x1efa),
(0x1efb, 0x1efc),
(0x1efd, 0x1efe),
(0x1eff, 0x1f08),
(0x1f10, 0x1f16),
(0x1f20, 0x1f28),
(0x1f30, 0x1f38),
(0x1f40, 0x1f46),
(0x1f50, 0x1f58),
(0x1f60, 0x1f68),
(0x1f70, 0x1f71),
(0x1f72, 0x1f73),
(0x1f74, 0x1f75),
(0x1f76, 0x1f77),
(0x1f78, 0x1f79),
(0x1f7a, 0x1f7b),
(0x1f7c, 0x1f7d),
(0x1fb0, 0x1fb2),
(0x1fb6, 0x1fb7),
(0x1fc6, 0x1fc7),
(0x1fd0, 0x1fd3),
(0x1fd6, 0x1fd8),
(0x1fe0, 0x1fe3),
(0x1fe4, 0x1fe8),
(0x1ff6, 0x1ff7),
(0x214e, 0x214f),
(0x2184, 0x2185),
(0x2c30, 0x2c5f),
(0x2c61, 0x2c62),
(0x2c65, 0x2c67),
(0x2c68, 0x2c69),
(0x2c6a, 0x2c6b),
(0x2c6c, 0x2c6d),
(0x2c71, 0x2c72),
(0x2c73, 0x2c75),
(0x2c76, 0x2c7c),
(0x2c81, 0x2c82),
(0x2c83, 0x2c84),
(0x2c85, 0x2c86),
(0x2c87, 0x2c88),
(0x2c89, 0x2c8a),
(0x2c8b, 0x2c8c),
(0x2c8d, 0x2c8e),
(0x2c8f, 0x2c90),
(0x2c91, 0x2c92),
(0x2c93, 0x2c94),
(0x2c95, 0x2c96),
(0x2c97, 0x2c98),
(0x2c99, 0x2c9a),
(0x2c9b, 0x2c9c),
(0x2c9d, 0x2c9e),
(0x2c9f, 0x2ca0),
(0x2ca1, 0x2ca2),
(0x2ca3, 0x2ca4),
(0x2ca5, 0x2ca6),
(0x2ca7, 0x2ca8),
(0x2ca9, 0x2caa),
(0x2cab, 0x2cac),
(0x2cad, 0x2cae),
(0x2caf, 0x2cb0),
(0x2cb1, 0x2cb2),
(0x2cb3, 0x2cb4),
(0x2cb5, 0x2cb6),
(0x2cb7, 0x2cb8),
(0x2cb9, 0x2cba),
(0x2cbb, 0x2cbc),
(0x2cbd, 0x2cbe),
(0x2cbf, 0x2cc0),
(0x2cc1, 0x2cc2),
(0x2cc3, 0x2cc4),
(0x2cc5, 0x2cc6),
(0x2cc7, 0x2cc8),
(0x2cc9, 0x2cca),
(0x2ccb, 0x2ccc),
(0x2ccd, 0x2cce),
(0x2ccf, 0x2cd0),
(0x2cd1, 0x2cd2),
(0x2cd3, 0x2cd4),
(0x2cd5, 0x2cd6),
(0x2cd7, 0x2cd8),
(0x2cd9, 0x2cda),
(0x2cdb, 0x2cdc),
(0x2cdd, 0x2cde),
(0x2cdf, 0x2ce0),
(0x2ce1, 0x2ce2),
(0x2ce3, 0x2ce5),
(0x2cec, 0x2ced),
(0x2cee, 0x2cf2),
(0x2cf3, 0x2cf4),
(0x2d00, 0x2d26),
(0x2d27, 0x2d28),
(0x2d2d, 0x2d2e),
(0x2d30, 0x2d68),
(0x2d7f, 0x2d97),
(0x2da0, 0x2da7),
(0x2da8, 0x2daf),
(0x2db0, 0x2db7),
(0x2db8, 0x2dbf),
(0x2dc0, 0x2dc7),
(0x2dc8, 0x2dcf),
(0x2dd0, 0x2dd7),
(0x2dd8, 0x2ddf),
(0x2de0, 0x2e00),
(0x2e2f, 0x2e30),
(0x3005, 0x3008),
(0x302a, 0x302e),
(0x303c, 0x303d),
(0x3041, 0x3097),
(0x3099, 0x309b),
(0x309d, 0x309f),
(0x30a1, 0x30fb),
(0x30fc, 0x30ff),
(0x3105, 0x312e),
(0x31a0, 0x31bb),
(0x31f0, 0x3200),
(0x3400, 0x4db6),
(0x4e00, 0x9fcd),
(0xa000, 0xa48d),
(0xa4d0, 0xa4fe),
(0xa500, 0xa60d),
(0xa610, 0xa62c),
(0xa641, 0xa642),
(0xa643, 0xa644),
(0xa645, 0xa646),
(0xa647, 0xa648),
(0xa649, 0xa64a),
(0xa64b, 0xa64c),
(0xa64d, 0xa64e),
(0xa64f, 0xa650),
(0xa651, 0xa652),
(0xa653, 0xa654),
(0xa655, 0xa656),
(0xa657, 0xa658),
(0xa659, 0xa65a),
(0xa65b, 0xa65c),
(0xa65d, 0xa65e),
(0xa65f, 0xa660),
(0xa661, 0xa662),
(0xa663, 0xa664),
(0xa665, 0xa666),
(0xa667, 0xa668),
(0xa669, 0xa66a),
(0xa66b, 0xa66c),
(0xa66d, 0xa670),
(0xa674, 0xa67e),
(0xa67f, 0xa680),
(0xa681, 0xa682),
(0xa683, 0xa684),
(0xa685, 0xa686),
(0xa687, 0xa688),
(0xa689, 0xa68a),
(0xa68b, 0xa68c),
(0xa68d, 0xa68e),
(0xa68f, 0xa690),
(0xa691, 0xa692),
(0xa693, 0xa694),
(0xa695, 0xa696),
(0xa697, 0xa698),
(0xa69f, 0xa6e6),
(0xa6f0, 0xa6f2),
(0xa717, 0xa720),
(0xa723, 0xa724),
(0xa725, 0xa726),
(0xa727, 0xa728),
(0xa729, 0xa72a),
(0xa72b, 0xa72c),
(0xa72d, 0xa72e),
(0xa72f, 0xa732),
(0xa733, 0xa734),
(0xa735, 0xa736),
(0xa737, 0xa738),
(0xa739, 0xa73a),
(0xa73b, 0xa73c),
(0xa73d, 0xa73e),
(0xa73f, 0xa740),
(0xa741, 0xa742),
(0xa743, 0xa744),
(0xa745, 0xa746),
(0xa747, 0xa748),
(0xa749, 0xa74a),
(0xa74b, 0xa74c),
(0xa74d, 0xa74e),
(0xa74f, 0xa750),
(0xa751, 0xa752),
(0xa753, 0xa754),
(0xa755, 0xa756),
(0xa757, 0xa758),
(0xa759, 0xa75a),
(0xa75b, 0xa75c),
(0xa75d, 0xa75e),
(0xa75f, 0xa760),
(0xa761, 0xa762),
(0xa763, 0xa764),
(0xa765, 0xa766),
(0xa767, 0xa768),
(0xa769, 0xa76a),
(0xa76b, 0xa76c),
(0xa76d, 0xa76e),
(0xa76f, 0xa770),
(0xa771, 0xa779),
(0xa77a, 0xa77b),
(0xa77c, 0xa77d),
(0xa77f, 0xa780),
(0xa781, 0xa782),
(0xa783, 0xa784),
(0xa785, 0xa786),
(0xa787, 0xa789),
(0xa78c, 0xa78d),
(0xa78e, 0xa78f),
(0xa791, 0xa792),
(0xa793, 0xa794),
(0xa7a1, 0xa7a2),
(0xa7a3, 0xa7a4),
(0xa7a5, 0xa7a6),
(0xa7a7, 0xa7a8),
(0xa7a9, 0xa7aa),
(0xa7fa, 0xa828),
(0xa840, 0xa874),
(0xa880, 0xa8c5),
(0xa8d0, 0xa8da),
(0xa8e0, 0xa8f8),
(0xa8fb, 0xa8fc),
(0xa900, 0xa92e),
(0xa930, 0xa954),
(0xa980, 0xa9c1),
(0xa9cf, 0xa9da),
(0xaa00, 0xaa37),
(0xaa40, 0xaa4e),
(0xaa50, 0xaa5a),
(0xaa60, 0xaa77),
(0xaa7a, 0xaa7c),
(0xaa80, 0xaac3),
(0xaadb, 0xaade),
(0xaae0, 0xaaf0),
(0xaaf2, 0xaaf7),
(0xab01, 0xab07),
(0xab09, 0xab0f),
(0xab11, 0xab17),
(0xab20, 0xab27),
(0xab28, 0xab2f),
(0xabc0, 0xabeb),
(0xabec, 0xabee),
(0xabf0, 0xabfa),
(0xac00, 0xd7a4),
(0xfa0e, 0xfa10),
(0xfa11, 0xfa12),
(0xfa13, 0xfa15),
(0xfa1f, 0xfa20),
(0xfa21, 0xfa22),
(0xfa23, 0xfa25),
(0xfa27, 0xfa2a),
(0xfb1e, 0xfb1f),
(0xfe20, 0xfe27),
(0xfe73, 0xfe74),
(0x10000, 0x1000c),
(0x1000d, 0x10027),
(0x10028, 0x1003b),
(0x1003c, 0x1003e),
(0x1003f, 0x1004e),
(0x10050, 0x1005e),
(0x10080, 0x100fb),
(0x101fd, 0x101fe),
(0x10280, 0x1029d),
(0x102a0, 0x102d1),
(0x10300, 0x1031f),
(0x10330, 0x10341),
(0x10342, 0x1034a),
(0x10380, 0x1039e),
(0x103a0, 0x103c4),
(0x103c8, 0x103d0),
(0x10428, 0x1049e),
(0x104a0, 0x104aa),
(0x10800, 0x10806),
(0x10808, 0x10809),
(0x1080a, 0x10836),
(0x10837, 0x10839),
(0x1083c, 0x1083d),
(0x1083f, 0x10856),
(0x10900, 0x10916),
(0x10920, 0x1093a),
(0x10980, 0x109b8),
(0x109be, 0x109c0),
(0x10a00, 0x10a04),
(0x10a05, 0x10a07),
(0x10a0c, 0x10a14),
(0x10a15, 0x10a18),
(0x10a19, 0x10a34),
(0x10a38, 0x10a3b),
(0x10a3f, 0x10a40),
(0x10a60, 0x10a7d),
(0x10b00, 0x10b36),
(0x10b40, 0x10b56),
(0x10b60, 0x10b73),
(0x10c00, 0x10c49),
(0x11000, 0x11047),
(0x11066, 0x11070),
(0x11080, 0x110bb),
(0x110d0, 0x110e9),
(0x110f0, 0x110fa),
(0x11100, 0x11135),
(0x11136, 0x11140),
(0x11180, 0x111c5),
(0x111d0, 0x111da),
(0x11680, 0x116b8),
(0x116c0, 0x116ca),
(0x12000, 0x1236f),
(0x13000, 0x1342f),
(0x16800, 0x16a39),
(0x16f00, 0x16f45),
(0x16f50, 0x16f7f),
(0x16f8f, 0x16fa0),
(0x1b000, 0x1b002),
(0x20000, 0x2a6d7),
(0x2a700, 0x2b735),
(0x2b740, 0x2b81e),
),
'CONTEXTJ': (
(0x200c, 0x200e),
),
'CONTEXTO': (
(0xb7, 0xb8),
(0x375, 0x376),
(0x5f3, 0x5f5),
(0x660, 0x66a),
(0x6f0, 0x6fa),
(0x30fb, 0x30fc),
),
}
|
mit
|
miaecle/deepchem
|
examples/hopv/hopv_graph_conv.py
|
4
|
1203
|
"""
Script that trains graph-conv models on HOPV dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from deepchem.models import GraphConvModel
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc
from deepchem.molnet import load_hopv
# Load HOPV dataset
hopv_tasks, hopv_datasets, transformers = load_hopv(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = hopv_datasets
# Fit models
metric = [
dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean, mode="regression"),
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression")
]
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 50
model = GraphConvModel(
len(hopv_tasks), batch_size=batch_size, mode='regression')
# Fit trained model
model.fit(train_dataset, nb_epoch=25)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
valid_scores = model.evaluate(valid_dataset, metric, transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
mit
|
pulpocoders/pulpo-forms-examples
|
pulpo_example/fields.py
|
2
|
1110
|
from pulpo_forms.fieldtypes import ModelField
from pulpo_forms.fieldtypes import FieldFactory
from .models import PulpoUser, Club, Country
class PulpoUserField(ModelField.ModelField):
prp_template_name = "usuario/properties.html"
model = PulpoUser
name = "PulpoUser"
def get_assets():
return ['pulpo_example/js/fields/PulpoUser.js']
def __str__(self):
return "PulpoUser"
FieldFactory.FieldFactory.register('PulpoUserField', PulpoUserField)
class CountryField(ModelField.ModelField):
prp_template_name = "club/properties.html"
model = Country
name = "Country"
def get_assets():
return ['pulpo_example/js/fields/Country.js']
def __str__(self):
return "Country"
FieldFactory.FieldFactory.register('CountryField', CountryField)
class ClubField(ModelField.ModelField):
prp_template_name = "club/properties.html"
model = Club
name = "Club"
def get_assets():
return ['pulpo_example/js/fields/Club.js']
def __str__(self):
return "Club"
FieldFactory.FieldFactory.register('ClubField', ClubField)
|
apache-2.0
|
chenc10/Spark-PAF
|
dist/ec2/lib/boto-2.34.0/tests/unit/sqs/test_connection.py
|
90
|
12763
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase, MockServiceWithConfigTestCase
from tests.compat import mock
from boto.sqs.connection import SQSConnection
from boto.sqs.regioninfo import SQSRegionInfo
from boto.sqs.message import RawMessage
from boto.sqs.queue import Queue
from boto.connection import AWSQueryConnection
from nose.plugins.attrib import attr
class SQSAuthParams(AWSMockServiceTestCase):
connection_class = SQSConnection
def setUp(self):
super(SQSAuthParams, self).setUp()
def default_body(self):
return """<?xml version="1.0"?>
<CreateQueueResponse>
<CreateQueueResult>
<QueueUrl>
https://queue.amazonaws.com/599169622985/myqueue1
</QueueUrl>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>54d4c94d-2307-54a8-bb27-806a682a5abd</RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""
@attr(sqs=True)
def test_auth_service_name_override(self):
self.set_http_response(status_code=200)
# We can use the auth_service_name to change what service
# name to use for the credential scope for sigv4.
self.service_connection.auth_service_name = 'service_override'
self.service_connection.create_queue('my_queue')
# Note the service_override value instead.
self.assertIn('us-east-1/service_override/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_class_attribute_can_set_service_name(self):
self.set_http_response(status_code=200)
# The SQS class has an 'AuthServiceName' param of 'sqs':
self.assertEqual(self.service_connection.AuthServiceName, 'sqs')
self.service_connection.create_queue('my_queue')
# And because of this, the value of 'sqs' will be used instead of
# 'queue' for the credential scope:
self.assertIn('us-east-1/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_auth_region_name_is_automatically_updated(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
region=region)
self.initialize_service_connection()
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
# Note the region name below is 'us-west-2'.
self.assertIn('us-west-2/sqs/aws4_request',
self.actual_request.headers['Authorization'])
@attr(sqs=True)
def test_set_get_auth_service_and_region_names(self):
self.service_connection.auth_service_name = 'service_name'
self.service_connection.auth_region_name = 'region_name'
self.assertEqual(self.service_connection.auth_service_name,
'service_name')
self.assertEqual(self.service_connection.auth_region_name, 'region_name')
@attr(sqs=True)
def test_get_queue_with_owner_account_id_returns_queue(self):
self.set_http_response(status_code=200)
self.service_connection.create_queue('my_queue')
self.service_connection.get_queue('my_queue', '599169622985')
assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys()
self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985')
class SQSProfileName(MockServiceWithConfigTestCase):
connection_class = SQSConnection
profile_name = 'prod'
def setUp(self):
super(SQSProfileName, self).setUp()
self.config = {
"profile prod": {
'aws_access_key_id': 'access_key',
'aws_secret_access_key': 'secret_access',
}
}
@attr(sqs=True)
def test_profile_name_gets_passed(self):
region = SQSRegionInfo(name='us-west-2',
endpoint='us-west-2.queue.amazonaws.com')
self.service_connection = SQSConnection(
https_connection_factory=self.https_connection_factory,
region=region,
profile_name=self.profile_name)
self.initialize_service_connection()
self.set_http_response(status_code=200)
self.assertEquals(self.service_connection.profile_name, self.profile_name)
class SQSMessageAttributesParsing(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<?xml version="1.0"?>
<ReceiveMessageResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<ReceiveMessageResult>
<Message>
<Body>This is a test</Body>
<ReceiptHandle>+eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo=</ReceiptHandle>
<MD5OfBody>ce114e4501d2f4e2dcea3e17b546f339</MD5OfBody>
<MessageAttribute>
<Name>Count</Name>
<Value>
<DataType>Number</DataType>
<StringValue>1</StringValue>
</Value>
</MessageAttribute>
<MessageAttribute>
<Name>Foo</Name>
<Value>
<DataType>String</DataType>
<StringValue>Bar</StringValue>
</Value>
</MessageAttribute>
<MessageId>7049431b-e5f6-430b-93c4-ded53864d02b</MessageId>
<MD5OfMessageAttributes>324758f82d026ac6ec5b31a3b192d1e3</MD5OfMessageAttributes>
</Message>
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>73f978f2-400b-5460-8d38-3316e39e79c6</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"""
@attr(sqs=True)
def test_message_attribute_response(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message = self.service_connection.receive_message(queue)[0]
self.assertEqual(message.get_body(), 'This is a test')
self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b')
self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339')
self.assertEqual(message.md5_message_attributes,
'324758f82d026ac6ec5b31a3b192d1e3')
mattributes = message.message_attributes
self.assertEqual(len(mattributes.keys()), 2)
self.assertEqual(mattributes['Count']['data_type'], 'Number')
self.assertEqual(mattributes['Foo']['string_value'], 'Bar')
class SQSSendMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfMessageBody>
<MD5OfMessageAttributes>
3ae8f24a165a8cedc005670c81a27295
</MD5OfMessageAttributes>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
self.service_connection.send_message(queue, 'Test message',
message_attributes={
'name1': {
'data_type': 'String',
'string_value': 'Bob'
},
'name2': {
'data_type': 'Number',
'string_value': '1'
}
})
self.assert_request_parameters({
'Action': 'SendMessage',
'MessageAttribute.1.Name': 'name1',
'MessageAttribute.1.Value.DataType': 'String',
'MessageAttribute.1.Value.StringValue': 'Bob',
'MessageAttribute.2.Name': 'name2',
'MessageAttribute.2.Value.DataType': 'Number',
'MessageAttribute.2.Value.StringValue': '1',
'MessageBody': 'Test message',
'Version': '2012-11-05'
})
class SQSSendBatchMessageAttributes(AWSMockServiceTestCase):
connection_class = SQSConnection
def default_body(self):
return """<SendMessageBatchResponse>
<SendMessageBatchResult>
<SendMessageBatchResultEntry>
<Id>test_msg_001</Id>
<MessageId>0a5231c7-8bff-4955-be2e-8dc7c50a25fa</MessageId>
<MD5OfMessageBody>0e024d309850c78cba5eabbeff7cae71</MD5OfMessageBody>
</SendMessageBatchResultEntry>
<SendMessageBatchResultEntry>
<Id>test_msg_002</Id>
<MessageId>15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9</MessageId>
<MD5OfMessageBody>7fb8146a82f95e0af155278f406862c2</MD5OfMessageBody>
<MD5OfMessageAttributes>295c5fa15a51aae6884d1d7c1d99ca50</MD5OfMessageAttributes>
</SendMessageBatchResultEntry>
</SendMessageBatchResult>
<ResponseMetadata>
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId>
</ResponseMetadata>
</SendMessageBatchResponse>
"""
@attr(sqs=True)
def test_send_message_attributes(self):
self.set_http_response(status_code=200)
queue = Queue(
url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/',
message_class=RawMessage)
message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String',
'string_value': 'foo'}})
message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number',
'string_value': '1'}})
self.service_connection.send_message_batch(queue, (message1, message2))
self.assert_request_parameters({
'Action': 'SendMessageBatch',
'SendMessageBatchRequestEntry.1.DelaySeconds': 0,
'SendMessageBatchRequestEntry.1.Id': 1,
'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1',
'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo',
'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1',
'SendMessageBatchRequestEntry.2.DelaySeconds': 0,
'SendMessageBatchRequestEntry.2.Id': 2,
'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2',
'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1',
'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2',
'Version': '2012-11-05'
})
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
eliran-stratoscale/inaugurator
|
inaugurator/pyudev/device.py
|
3
|
37036
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011, 2012 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyudev.device
=============
Device class implementation of :mod:`pyudev`.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
from collections import Mapping, Container, Iterable
from datetime import timedelta
from inaugurator.pyudev._libudev import libudev
from inaugurator.pyudev._util import (ensure_byte_string, ensure_unicode_string,
udev_list_iterate, string_to_bool,
get_device_type)
__all__ = ['Device', 'Attributes', 'Tags',
'DeviceNotFoundError', 'DeviceNotFoundAtPathError',
'DeviceNotFoundByNameError', 'DeviceNotFoundByNumberError',
'DeviceNotFoundInEnvironmentError']
class DeviceNotFoundError(LookupError):
"""
An exception indicating that no :class:`Device` was found.
.. versionchanged:: 0.5
Rename from ``NoSuchDeviceError`` to its current name.
"""
class DeviceNotFoundAtPathError(DeviceNotFoundError):
"""
A :exc:`DeviceNotFoundError` indicating that no :class:`Device` was
found at a given path.
"""
def __init__(self, sys_path):
DeviceNotFoundError.__init__(self, sys_path)
@property
def sys_path(self):
"""
The path that caused this error as string.
"""
return self.args[0]
def __str__(self):
return 'No device at {0!r}'.format(self.sys_path)
class DeviceNotFoundByNameError(DeviceNotFoundError):
"""
A :exc:`DeviceNotFoundError` indicating that no :class:`Device` was
found with a given name.
"""
def __init__(self, subsystem, sys_name):
DeviceNotFoundError.__init__(self, subsystem, sys_name)
@property
def subsystem(self):
"""
The subsystem that caused this error as string.
"""
return self.args[0]
@property
def sys_name(self):
"""
The sys name that caused this error as string.
"""
return self.args[1]
def __str__(self):
return 'No device {0.sys_name!r} in {0.subsystem!r}'.format(self)
class DeviceNotFoundByNumberError(DeviceNotFoundError):
"""
A :exc:`DeviceNotFoundError` indicating, that no :class:`Device` was found
for a given device number.
"""
def __init__(self, type, number):
DeviceNotFoundError.__init__(self, type, number)
@property
def device_type(self):
"""
The device type causing this error as string. Either ``'char'`` or
``'block'``.
"""
return self.args[0]
@property
def device_number(self):
"""
The device number causing this error as integer.
"""
return self.args[1]
def __str__(self):
return ('No {0.device_type} device with number '
'{0.device_number}'.format(self))
class DeviceNotFoundInEnvironmentError(DeviceNotFoundError):
"""
A :exc:`DeviceNotFoundError` indicating, that no :class:`Device` could
be constructed from the process environment.
"""
def __str__(self):
return 'No device found in environment'
class Device(Mapping):
"""
A single device with attached attributes and properties.
This class subclasses the ``Mapping`` ABC, providing a read-only
dictionary mapping property names to the corresponding values.
Therefore all well-known dicitionary methods and operators
(e.g. ``.keys()``, ``.items()``, ``in``) are available to access device
properties.
Aside of the properties, a device also has a set of udev-specific
attributes like the path inside ``sysfs``.
:class:`Device` objects compare equal and unequal to other devices and
to strings (based on :attr:`device_path`). However, there is no
ordering on :class:`Device` objects, and the corresponding operators
``>``, ``<``, ``<=`` and ``>=`` raise :exc:`~exceptions.TypeError`.
.. warning::
Do **never** use object identity (``is`` operator) to compare
:class:`Device` objects. :mod:`pyudev` may create multiple
:class:`Device` objects for the same device. Instead simply compare
devices by value using ``==`` or ``!=``.
:class:`Device` objects are hashable and can therefore be used as keys
in dictionaries and sets.
They can also be given directly as ``udev_device *`` to functions wrapped
through :mod:`ctypes`.
"""
@classmethod
def from_path(cls, context, path):
"""
Create a device from a device ``path``. The ``path`` may or may not
start with the ``sysfs`` mount point:
>>> from pyudev import Context, Device
>>> context = Context()
>>> Device.from_path(context, '/devices/platform')
Device(u'/sys/devices/platform')
>>> Device.from_path(context, '/sys/devices/platform')
Device(u'/sys/devices/platform')
``context`` is the :class:`Context` in which to search the device.
``path`` is a device path as unicode or byte string.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundAtPathError`, if no device was found for ``path``.
.. versionadded:: 0.4
"""
if not path.startswith(context.sys_path):
path = os.path.join(context.sys_path, path.lstrip(os.sep))
return cls.from_sys_path(context, path)
@classmethod
def from_sys_path(cls, context, sys_path):
"""
Create a new device from a given ``sys_path``:
>>> from pyudev import Context, Device
>>> context = Context()
>>> Device.from_path(context, '/sys/devices/platform')
Device(u'/sys/devices/platform')
``context`` is the :class:`Context` in which to search the device.
``sys_path`` is a unicode or byte string containing the path of the
device inside ``sysfs`` with the mount point included.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundAtPathError`, if no device was found for
``sys_path``.
.. versionchanged:: 0.4
Raise :exc:`NoSuchDeviceError` instead of returning ``None``, if
no device was found for ``sys_path``.
.. versionchanged:: 0.5
Raise :exc:`DeviceNotFoundAtPathError` instead of
:exc:`NoSuchDeviceError`.
"""
device = libudev.udev_device_new_from_syspath(
context, ensure_byte_string(sys_path))
if not device:
raise DeviceNotFoundAtPathError(sys_path)
return cls(context, device)
@classmethod
def from_name(cls, context, subsystem, sys_name):
"""
Create a new device from a given ``subsystem`` and a given
``sys_name``:
>>> from pyudev import Context, Device
>>> context = Context()
>>> sda = Device.from_name(context, 'block', 'sda')
>>> sda
Device(u'/sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda')
>>> sda == Device.from_path(context, '/block/sda')
``context`` is the :class:`Context` in which to search the device.
``subsystem`` and ``sys_name`` are byte or unicode strings, which
denote the subsystem and the name of the device to create.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundByNameError`, if no device was found with the given
name.
.. versionadded:: 0.5
"""
device = libudev.udev_device_new_from_subsystem_sysname(
context, ensure_byte_string(subsystem),
ensure_byte_string(sys_name))
if not device:
raise DeviceNotFoundByNameError(subsystem, sys_name)
return cls(context, device)
@classmethod
def from_device_number(cls, context, type, number):
"""
Create a new device from a device ``number`` with the given device
``type``:
>>> import os
>>> from pyudev import Context, Device
>>> ctx = Context()
>>> major, minor = 8, 0
>>> device = Device.from_device_number(context, 'block',
... os.makedev(major, minor))
>>> device
Device(u'/sys/devices/pci0000:00/0000:00:11.0/host0/target0:0:0/0:0:0:0/block/sda')
>>> os.major(device.device_number), os.minor(device.device_number)
(8, 0)
Use :func:`os.makedev` to construct a device number from a major and a
minor device number, as shown in the example above.
.. warning::
Device numbers are not unique across different device types.
Passing a correct number with a wrong type may silently yield a
wrong device object, so make sure to pass the correct device type.
``context`` is the :class:`Context`, in which to search the device.
``type`` is either ``'char'`` or ``'block'``, according to whether the
device is a character or block device. ``number`` is the device number
as integer.
Return a :class:`Device` object for the device with the given device
``number``. Raise :exc:`DeviceNotFoundByNumberError`, if no device was
found with the given device type and number. Raise
:exc:`~exceptions.ValueError`, if ``type`` is any other string than
``'char'`` or ``'block'``.
.. versionadded:: 0.11
"""
if type not in ('char', 'block'):
raise ValueError('Invalid type: {0!r}. Must be one of "char" '
'or "block".'.format(type))
device = libudev.udev_device_new_from_devnum(
context, ensure_byte_string(type[0]), number)
if not device:
raise DeviceNotFoundByNumberError(type, number)
return cls(context, device)
@classmethod
def from_device_file(cls, context, filename):
"""
Create a new device from the given device file:
>>> from pyudev import Context, Device
>>> context = Context()
>>> device = Device.from_device_file(context, '/dev/sda')
>>> device
Device(u'/sys/devices/pci0000:00/0000:00:0d.0/host2/target2:0:0/2:0:0:0/block/sda')
>>> device.device_node
u'/dev/sda'
.. warning::
Though the example seems to suggest that ``device.device_node ==
filename`` holds with ``device = Device.from_device_file(context,
filename)``, this is only true in a majority of cases. There *can*
be devices, for which this relation is actually false! Thus, do
*not* expect :attr:`~Device.device_node` to be equal to the given
``filename`` for the returned :class:`Device`. Especially, use
:attr:`~Device.device_node` if you need the device file of a
:class:`Device` created with this method afterwards.
``context`` is the :class:`Context` in which to search the device.
``filename`` is a string containing the path of a device file.
Return a :class:`Device` representing the given device file. Raise
:exc:`~exceptions.ValueError` if ``filename`` is no device file at all.
Raise :exc:`~exceptions.EnvironmentError` if ``filename`` does not
exist or if its metadata was inaccessible.
.. versionadded:: 0.15
"""
device_type = get_device_type(filename)
device_number = os.stat(filename).st_rdev
return cls.from_device_number(context, device_type, device_number)
@classmethod
def from_environment(cls, context):
"""
Create a new device from the process environment (as in
:data:`os.environ`).
This only works reliable, if the current process is called from an
udev rule, and is usually used for tools executed from ``IMPORT=``
rules. Use this method to create device objects in Python scripts
called from udev rules.
``context`` is the library :class:`Context`.
Return a :class:`Device` object constructed from the environment.
Raise :exc:`DeviceNotFoundInEnvironmentError`, if no device could be
created from the environment.
.. udevversion:: 152
.. versionadded:: 0.6
"""
device = libudev.udev_device_new_from_environment(context)
if not device:
raise DeviceNotFoundInEnvironmentError()
return cls(context, device)
def __init__(self, context, _device):
self.context = context
self._as_parameter_ = _device
def __del__(self):
libudev.udev_device_unref(self)
def __repr__(self):
return 'Device({0.sys_path!r})'.format(self)
@property
def parent(self):
"""
The parent :class:`Device` or ``None``, if there is no parent
device.
"""
parent = libudev.udev_device_get_parent(self)
if not parent:
return None
# the parent device is not referenced, thus forcibly acquire a
# reference
return Device(self.context, libudev.udev_device_ref(parent))
@property
def children(self):
"""
Yield all direct children of this device.
.. note::
In udev, parent-child relationships are generally ambiguous, i.e.
a parent can have multiple children, *and* a child can have multiple
parents. Hence, `child.parent == parent` does generally *not* hold
for all `child` objects in `parent.children`. In other words,
the :attr:`parent` of a device in this property can be different
from this device!
.. note::
As the underlying library does not provide any means to directly
query the children of a device, this property performs a linear
search through all devices.
Return an iterable yielding a :class:`Device` object for each direct
child of this device.
.. udevversion:: 172
.. versionchanged:: 0.13
Requires udev version 172 now.
"""
for device in self.context.list_devices().match_parent(self):
if device != self:
yield device
@property
def ancestors(self):
"""
Yield all ancestors of this device from bottom to top.
Return an iterator yielding a :class:`Device` object for each
ancestor of this device from bottom to top.
.. versionadded:: 0.16
"""
parent = self.parent
while parent:
yield parent
parent = parent.parent
def find_parent(self, subsystem, device_type=None):
"""
Find the parent device with the given ``subsystem`` and
``device_type``.
``subsystem`` is a byte or unicode string containing the name of the
subsystem, in which to search for the parent. ``device_type`` is a
byte or unicode string holding the expected device type of the parent.
It can be ``None`` (the default), which means, that no specific device
type is expected.
Return a parent :class:`Device` within the given ``subsystem`` and – if
``device_type`` is not ``None`` – with the given ``device_type``, or
``None``, if this device has no parent device matching these
constraints.
.. versionadded:: 0.9
"""
subsystem = ensure_byte_string(subsystem)
if device_type is not None:
device_type = ensure_byte_string(device_type)
parent = libudev.udev_device_get_parent_with_subsystem_devtype(
self, subsystem, device_type)
if not parent:
return None
# parent device is not referenced, thus forcibly acquire a reference
return Device(self.context, libudev.udev_device_ref(parent))
def traverse(self):
"""
Traverse all parent devices of this device from bottom to top.
Return an iterable yielding all parent devices as :class:`Device`
objects, *not* including the current device. The last yielded
:class:`Device` is the top of the device hierarchy.
.. deprecated:: 0.16
Will be removed in 1.0. Use :attr:`ancestors` instead.
"""
import warnings
warnings.warn('Will be removed in 1.0. Use Device.ancestors instead.',
DeprecationWarning)
return self.ancestors
@property
def sys_path(self):
"""
Absolute path of this device in ``sysfs`` including the ``sysfs``
mount point as unicode string.
"""
return ensure_unicode_string(libudev.udev_device_get_syspath(self))
@property
def device_path(self):
"""
Kernel device path as unicode string. This path uniquely identifies
a single device.
Unlike :attr:`sys_path`, this path does not contain the ``sysfs``
mount point. However, the path is absolute and starts with a slash
``'/'``.
"""
return ensure_unicode_string(libudev.udev_device_get_devpath(self))
@property
def subsystem(self):
"""
Name of the subsystem this device is part of as unicode string.
"""
return ensure_unicode_string(libudev.udev_device_get_subsystem(self))
@property
def sys_name(self):
"""
Device file name inside ``sysfs`` as unicode string.
"""
return ensure_unicode_string(libudev.udev_device_get_sysname(self))
@property
def sys_number(self):
"""
The trailing number of the :attr:`sys_name` as unicode string, or
``None``, if the device has no trailing number in its name.
.. note::
The number is returned as unicode string to preserve the exact
format of the number, especially any leading zeros:
>>> from pyudev import Context, Device
>>> context = Context()
>>> device = Device.from_path(context, '/sys/devices/LNXSYSTM:00')
>>> device.sys_number
u'00'
To work with numbers, explicitly convert them to ints:
>>> int(device.sys_number)
0
.. versionadded:: 0.11
"""
number = libudev.udev_device_get_sysnum(self)
if number is not None:
return ensure_unicode_string(number)
@property
def device_type(self):
"""
Device type as unicode string, or ``None``, if the device type is
unknown.
>>> from pyudev import Context
>>> context = Context()
>>> for device in context.list_devices(subsystem='net'):
... '{0} - {1}'.format(device.sys_name, device.device_type or 'ethernet')
...
u'eth0 - ethernet'
u'wlan0 - wlan'
u'lo - ethernet'
u'vboxnet0 - ethernet'
.. versionadded:: 0.10
"""
device_type = libudev.udev_device_get_devtype(self)
if device_type is not None:
return ensure_unicode_string(device_type)
@property
def driver(self):
"""
The driver name as unicode string, or ``None``, if there is no
driver for this device.
.. versionadded:: 0.5
"""
driver = libudev.udev_device_get_driver(self)
if driver:
return ensure_unicode_string(driver)
@property
def device_node(self):
"""
Absolute path to the device node of this device as unicode string or
``None``, if this device doesn't have a device node. The path
includes the device directory (see :attr:`Context.device_path`).
This path always points to the actual device node associated with
this device, and never to any symbolic links to this device node.
See :attr:`device_links` to get a list of symbolic links to this
device node.
.. warning::
For devices created with :meth:`from_device_file()`, the value of
this property is not necessary equal to the ``filename`` given to
:meth:`from_device_file()`.
"""
node = libudev.udev_device_get_devnode(self)
if node:
return ensure_unicode_string(node)
@property
def device_number(self):
"""
The device number of the associated device as integer, or ``0``, if no
device number is associated.
Use :func:`os.major` and :func:`os.minor` to decompose the device
number into its major and minor number:
>>> import os
>>> from pyudev import Context, Device
>>> context = Context()
>>> sda = Device.from_name(context, 'block', 'sda')
>>> sda.device_number
2048L
>>> (os.major(sda.device_number), os.minor(sda.device_number))
(8, 0)
For devices with an associated :attr:`device_node`, this is the same as
the ``st_rdev`` field of the stat result of the :attr:`device_node`:
>>> os.stat(sda.device_node).st_rdev
2048
.. versionadded:: 0.11
"""
return libudev.udev_device_get_devnum(self)
@property
def is_initialized(self):
"""
``True``, if the device is initialized, ``False`` otherwise.
A device is initialized, if udev has already handled this device and
has set up device node permissions and context, or renamed a network
device.
Consequently, this property is only implemented for devices with a
device node or for network devices. On all other devices this property
is always ``True``.
It is *not* recommended, that you use uninitialized devices.
.. seealso:: :attr:`time_since_initialized`
.. udevversion:: 165
.. versionadded:: 0.8
"""
return bool(libudev.udev_device_get_is_initialized(self))
@property
def time_since_initialized(self):
"""
The time elapsed since initialization as :class:`~datetime.timedelta`.
This property is only implemented on devices, which need to store
properties in the udev database. On all other devices this property is
simply zero :class:`~datetime.timedelta`.
.. seealso:: :attr:`is_initialized`
.. udevversion:: 165
.. versionadded:: 0.8
"""
microseconds = libudev.udev_device_get_usec_since_initialized(self)
return timedelta(microseconds=microseconds)
@property
def device_links(self):
"""
An iterator, which yields the absolute paths (including the device
directory, see :attr:`Context.device_path`) of all symbolic links
pointing to the :attr:`device_node` of this device. The paths are
unicode strings.
UDev can create symlinks to the original device node (see
:attr:`device_node`) inside the device directory. This is often
used to assign a constant, fixed device node to devices like
removeable media, which technically do not have a constant device
node, or to map a single device into multiple device hierarchies.
The property provides access to all such symbolic links, which were
created by UDev for this device.
.. warning::
Links are not necessarily resolved by
:meth:`Device.from_device_file()`. Hence do *not* rely on
``Device.from_device_file(context, link).device_path ==
device.device_path`` from any ``link`` in ``device.device_links``.
"""
devlinks = libudev.udev_device_get_devlinks_list_entry(self)
for name, _ in udev_list_iterate(devlinks):
yield ensure_unicode_string(name)
@property
def action(self):
"""
The device event action as string, or ``None``, if this device was not
received from a :class:`Monitor`.
Usual actions are:
``'add'``
A device has been added (e.g. a USB device was plugged in)
``'remove'``
A device has been removed (e.g. a USB device was unplugged)
``'change'``
Something about the device changed (e.g. a device property)
``'online'``
The device is online now
``'offline'``
The device is offline now
.. warning::
Though the actions listed above are the most common, this property
*may* return other values, too, so be prepared to handle unknown
actions!
.. versionadded:: 0.16
"""
action = libudev.udev_device_get_action(self)
if action:
return ensure_unicode_string(action)
@property
def sequence_number(self):
"""
The device event sequence number as integer, or ``0`` if this device
has no sequence number, i.e. was not received from a :class:`Monitor`.
.. versionadded:: 0.16
"""
return libudev.udev_device_get_seqnum(self)
@property
def attributes(self):
"""
The system attributes of this device as read-only
:class:`Attributes` mapping.
System attributes are basically normal files inside the the device
directory. These files contain all sorts of information about the
device, which may not be reflected by properties. These attributes
are commonly used for matching in udev rules, and can be printed
using ``udevadm info --attribute-walk``.
The values of these attributes are not always proper strings, and
can contain arbitrary bytes.
.. versionadded:: 0.5
"""
# do *not* cache the created object in an attribute of this class.
# Doing so creates an uncollectable reference cycle between Device and
# Attributes, because Attributes refers to this object through
# Attributes.device.
return Attributes(self)
@property
def tags(self):
"""
A :class:`Tags` object representing the tags attached to this device.
The :class:`Tags` object supports a test for a single tag as well as
iteration over all tags:
>>> from pyudev import Context, Device
>>> context = Context()
>>> device = next(iter(context.list_devices(tag='systemd')))
>>> 'systemd' in device.tags
True
>>> list(device.tags)
[u'seat', u'systemd', u'uaccess']
Tags are arbitrary classifiers that can be attached to devices by udev
scripts and daemons. For instance, systemd_ uses tags for multi-seat_
support.
.. _systemd: http://freedesktop.org/wiki/Software/systemd
.. _multi-seat: http://www.freedesktop.org/wiki/Software/systemd/multiseat
.. udevversion:: 154
.. versionadded:: 0.6
.. versionchanged:: 0.13
Return a :class:`Tags` object now.
"""
return Tags(self)
def __iter__(self):
"""
Iterate over the names of all properties defined for this device.
Return a generator yielding the names of all properties of this
device as unicode strings.
"""
properties = libudev.udev_device_get_properties_list_entry(self)
for name, _ in udev_list_iterate(properties):
yield ensure_unicode_string(name)
def __len__(self):
"""
Return the amount of properties defined for this device as integer.
"""
properties = libudev.udev_device_get_properties_list_entry(self)
i = 0
for i, _ in enumerate(udev_list_iterate(properties), start=1):
pass
return i
def __getitem__(self, property):
"""
Get the given ``property`` from this device.
``property`` is a unicode or byte string containing the name of the
property.
Return the property value as unicode string, or raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device.
"""
value = libudev.udev_device_get_property_value(
self, ensure_byte_string(property))
if value is None:
raise KeyError(property)
return ensure_unicode_string(value)
def asint(self, property):
"""
Get the given ``property`` from this device as integer.
``property`` is a unicode or byte string containing the name of the
property.
Return the property value as integer. Raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device, or a :exc:`~exceptions.ValueError`, if the property
value cannot be converted to an integer.
"""
return int(self[property])
def asbool(self, property):
"""
Get the given ``property`` from this device as boolean.
A boolean property has either a value of ``'1'`` or of ``'0'``,
where ``'1'`` stands for ``True``, and ``'0'`` for ``False``. Any
other value causes a :exc:`~exceptions.ValueError` to be raised.
``property`` is a unicode or byte string containing the name of the
property.
Return ``True``, if the property value is ``'1'`` and ``False``, if
the property value is ``'0'``. Any other value raises a
:exc:`~exceptions.ValueError`. Raise a :exc:`~exceptions.KeyError`,
if the given property is not defined for this device.
"""
return string_to_bool(self[property])
def __hash__(self):
return hash(self.device_path)
def __eq__(self, other):
if isinstance(other, Device):
return self.device_path == other.device_path
else:
return self.device_path == other
def __ne__(self, other):
if isinstance(other, Device):
return self.device_path != other.device_path
else:
return self.device_path != other
def __gt__(self, other):
raise TypeError('Device not orderable')
def __lt__(self, other):
raise TypeError('Device not orderable')
def __le__(self, other):
raise TypeError('Device not orderable')
def __ge__(self, other):
raise TypeError('Device not orderable')
class Tags(Iterable, Container):
"""
A iterable over :class:`Device` tags.
Subclasses the ``Container`` and the ``Iterable`` ABC.
"""
def __init__(self, device):
self.device = device
if hasattr(libudev, 'udev_device_has_tag'):
def _has_tag(self, tag):
return bool(libudev.udev_device_has_tag(
self.device, ensure_byte_string(tag)))
else:
def _has_tag(self, tag):
return any(t == tag for t in self)
def __contains__(self, tag):
"""
Check for existence of ``tag``.
``tag`` is a tag as unicode string.
Return ``True``, if ``tag`` is attached to the device, ``False``
otherwise.
"""
return self._has_tag(tag)
def __iter__(self):
"""
Iterate over all tags.
Yield each tag as unicode string.
"""
tags = libudev.udev_device_get_tags_list_entry(self.device)
for tag, _ in udev_list_iterate(tags):
yield ensure_unicode_string(tag)
def _is_attribute_file(filepath):
"""
Check, if ``filepath`` points to a valid udev attribute filename.
Implementation is stolen from udev source code, ``print_all_attributes``
in ``udev/udevadm-info.c``. It excludes hidden files (starting with a
dot), the special files ``dev`` and ``uevent`` and links.
Return ``True``, if ``filepath`` refers to an attribute, ``False``
otherwise.
"""
filename = os.path.basename(filepath)
return not (filename.startswith('.') or
filename in ('dev', 'uevent') or
os.path.islink(filepath))
class Attributes(Mapping):
"""
A mapping which holds udev attributes for :class:`Device` objects.
This class subclasses the ``Mapping`` ABC, providing a read-only
dictionary mapping attribute names to the corresponding values.
Therefore all well-known dicitionary methods and operators
(e.g. ``.keys()``, ``.items()``, ``in``) are available to access device
attributes.
.. versionadded:: 0.5
"""
def __init__(self, device):
self.device = device
if hasattr(libudev, 'udev_device_get_sysattr_list_entry'):
@property
def _attributes(self):
attrs = libudev.udev_device_get_sysattr_list_entry(self.device)
for attribute, _ in udev_list_iterate(attrs):
yield ensure_unicode_string(attribute)
else:
@property
def _attributes(self):
sys_path = self.device.sys_path
return (fn for fn in os.listdir(sys_path) if
_is_attribute_file(os.path.join(sys_path, fn)) and
fn in self)
def __len__(self):
"""
Return the amount of attributes defined.
"""
i = 0
for i, _ in enumerate(self._attributes, start=1):
pass
return i
def __iter__(self):
"""
Iterate over all attributes defined.
Yield each attribute name as unicode string.
"""
return self._attributes
def __contains__(self, attribute):
value = libudev.udev_device_get_sysattr_value(
self.device, ensure_byte_string(attribute))
return value is not None
def __getitem__(self, attribute):
"""
Get the given system ``attribute`` for the device.
``attribute`` is a unicode or byte string containing the name of the
system attribute.
Return the attribute value as byte string, or raise a
:exc:`~exceptions.KeyError`, if the given attribute is not defined
for this device.
"""
value = libudev.udev_device_get_sysattr_value(
self.device, ensure_byte_string(attribute))
if value is None:
raise KeyError(attribute)
return value
def asstring(self, attribute):
"""
Get the given ``atribute`` for the device as unicode string.
Depending on the content of the attribute, this may or may not work.
Be prepared to catch :exc:`~exceptions.UnicodeDecodeError`.
``attribute`` is a unicode or byte string containing the name of the
attribute.
Return the attribute value as byte string. Raise a
:exc:`~exceptions.KeyError`, if the given attribute is not defined
for this device, or :exc:`~exceptions.UnicodeDecodeError`, if the
content of the attribute cannot be decoded into a unicode string.
"""
return ensure_unicode_string(self[attribute])
def asint(self, attribute):
"""
Get the given ``attribute`` as integer.
``attribute`` is a unicode or byte string containing the name of the
attribute.
Return the attribute value as integer. Raise a
:exc:`~exceptions.KeyError`, if the given attribute is not defined
for this device, or a :exc:`~exceptions.ValueError`, if the
attribute value cannot be converted to an integer.
"""
return int(self.asstring(attribute))
def asbool(self, attribute):
"""
Get the given ``attribute`` from this device as boolean.
A boolean attribute has either a value of ``'1'`` or of ``'0'``,
where ``'1'`` stands for ``True``, and ``'0'`` for ``False``. Any
other value causes a :exc:`~exceptions.ValueError` to be raised.
``attribute`` is a unicode or byte string containing the name of the
attribute.
Return ``True``, if the attribute value is ``'1'`` and ``False``, if
the attribute value is ``'0'``. Any other value raises a
:exc:`~exceptions.ValueError`. Raise a :exc:`~exceptions.KeyError`,
if the given attribute is not defined for this device.
"""
return string_to_bool(self.asstring(attribute))
|
apache-2.0
|
Mendeley/mendeley-python-sdk
|
mendeley/models/profiles.py
|
2
|
2355
|
import arrow
from mendeley.models.common import Discipline, Photo, Location, Education, Employment
from mendeley.response import SessionResponseObject
class Profile(SessionResponseObject):
"""
A Mendeley profile.
.. attribute:: id
.. attribute:: first_name
.. attribute:: last_name
.. attribute:: display_name
.. attribute:: email
.. attribute:: link
.. attribute:: research_interests
.. attribute:: academic_status
.. attribute:: verified
.. attribute:: user_type
"""
content_type = 'application/vnd.mendeley-profiles.1+json'
@property
def created(self):
"""
an :class:`Arrow <arrow.arrow.Arrow>` object.
"""
if 'created' in self.json:
return arrow.get(self.json['created'])
else:
return None
@property
def discipline(self):
"""
a :class:`Discipline <mendeley.models.common.Discipline>`.
"""
if 'discipline' in self.json:
return Discipline(self.json['discipline'])
else:
return None
@property
def photo(self):
"""
a :class:`Photo <mendeley.models.common.Photo>`.
"""
if 'photo' in self.json:
return Photo(self.json['photo'])
else:
return None
@property
def location(self):
"""
a :class:`Location <mendeley.models.common.Location>`.
"""
if 'location' in self.json:
return Location(self.json['location'])
else:
return None
@property
def education(self):
"""
a list of :class:`Education <mendeley.models.common.Education>` objects.
"""
if 'education' in self.json:
return [Education(e) for e in self.json['education']]
else:
return None
@property
def employment(self):
"""
a list of :class:`Employment <mendeley.models.common.Employment>` objects.
"""
if 'employment' in self.json:
return [Employment(e) for e in self.json['employment']]
else:
return None
@classmethod
def fields(cls):
return ['id', 'first_name', 'last_name', 'display_name', 'email', 'link', 'research_interests',
'academic_status', 'verified', 'user_type']
|
apache-2.0
|
maliciamrg/xbmc-addon-tvtumbler
|
resources/lib/dns/message.py
|
28
|
42234
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Messages"""
import cStringIO
import random
import struct
import sys
import time
import dns.edns
import dns.exception
import dns.flags
import dns.name
import dns.opcode
import dns.entropy
import dns.rcode
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.renderer
import dns.tsig
import dns.wiredata
class ShortHeader(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() is too short."""
pass
class TrailingJunk(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() has extra junk
at the end of it."""
pass
class UnknownHeaderField(dns.exception.DNSException):
"""Raised if a header field name is not recognized when converting from
text into a message."""
pass
class BadEDNS(dns.exception.FormError):
"""Raised if an OPT record occurs somewhere other than the start of
the additional data section."""
pass
class BadTSIG(dns.exception.FormError):
"""Raised if a TSIG record occurs somewhere other than the end of
the additional data section."""
pass
class UnknownTSIGKey(dns.exception.DNSException):
"""Raised if we got a TSIG but don't know the key."""
pass
class Message(object):
"""A DNS message.
@ivar id: The query id; the default is a randomly chosen id.
@type id: int
@ivar flags: The DNS flags of the message. @see: RFC 1035 for an
explanation of these flags.
@type flags: int
@ivar question: The question section.
@type question: list of dns.rrset.RRset objects
@ivar answer: The answer section.
@type answer: list of dns.rrset.RRset objects
@ivar authority: The authority section.
@type authority: list of dns.rrset.RRset objects
@ivar additional: The additional data section.
@type additional: list of dns.rrset.RRset objects
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: long
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar options: The EDNS options
@type options: list of dns.edns.Option objects
@ivar request_payload: The associated request's EDNS payload size.
@type request_payload: int
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
in dns.tsig, and the currently implemented algorithms are
HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
HMAC_SHA512.
@type keyalgorithm: string
@ivar request_mac: The TSIG MAC of the request message associated with
this message; used when validating TSIG signatures. @see: RFC 2845 for
more information on TSIG fields.
@type request_mac: string
@ivar fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@ivar original_id: TSIG original id; defaults to the message's id
@type original_id: int
@ivar tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@ivar other_data: TSIG other data.
@type other_data: string
@ivar mac: The TSIG MAC for this message.
@type mac: string
@ivar xfr: Is the message being used to contain the results of a DNS
zone transfer? The default is False.
@type xfr: bool
@ivar origin: The origin of the zone in messages which are used for
zone transfers or for DNS dynamic updates. The default is None.
@type origin: dns.name.Name object
@ivar tsig_ctx: The TSIG signature context associated with this
message. The default is None.
@type tsig_ctx: hmac.HMAC object
@ivar had_tsig: Did the message decoded from wire format have a TSIG
signature?
@type had_tsig: bool
@ivar multi: Is this message part of a multi-message sequence? The
default is false. This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type multi: bool
@ivar first: Is this message standalone, or the first of a multi
message sequence? This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type first: bool
@ivar index: An index of rrsets in the message. The index key is
(section, name, rdclass, rdtype, covers, deleting). Indexing can be
disabled by setting the index to None.
@type index: dict
"""
def __init__(self, id=None):
if id is None:
self.id = dns.entropy.random_16()
else:
self.id = id
self.flags = 0
self.question = []
self.answer = []
self.authority = []
self.additional = []
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.options = []
self.request_payload = 0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.request_mac = ''
self.other_data = ''
self.tsig_error = 0
self.fudge = 300
self.original_id = self.id
self.mac = ''
self.xfr = False
self.origin = None
self.tsig_ctx = None
self.had_tsig = False
self.multi = False
self.first = True
self.index = {}
def __repr__(self):
return '<DNS message, ID ' + `self.id` + '>'
def __str__(self):
return self.to_text()
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the message to text.
The I{origin}, I{relativize}, and any other keyword
arguments are passed to the rrset to_wire() method.
@rtype: string
"""
s = cStringIO.StringIO()
print >> s, 'id %d' % self.id
print >> s, 'opcode %s' % \
dns.opcode.to_text(dns.opcode.from_flags(self.flags))
rc = dns.rcode.from_flags(self.flags, self.ednsflags)
print >> s, 'rcode %s' % dns.rcode.to_text(rc)
print >> s, 'flags %s' % dns.flags.to_text(self.flags)
if self.edns >= 0:
print >> s, 'edns %s' % self.edns
if self.ednsflags != 0:
print >> s, 'eflags %s' % \
dns.flags.edns_to_text(self.ednsflags)
print >> s, 'payload', self.payload
is_update = dns.opcode.is_update(self.flags)
if is_update:
print >> s, ';ZONE'
else:
print >> s, ';QUESTION'
for rrset in self.question:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';PREREQ'
else:
print >> s, ';ANSWER'
for rrset in self.answer:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';UPDATE'
else:
print >> s, ';AUTHORITY'
for rrset in self.authority:
print >> s, rrset.to_text(origin, relativize, **kw)
print >> s, ';ADDITIONAL'
for rrset in self.additional:
print >> s, rrset.to_text(origin, relativize, **kw)
#
# We strip off the final \n so the caller can print the result without
# doing weird things to get around eccentricities in Python print
# formatting
#
return s.getvalue()[:-1]
def __eq__(self, other):
"""Two messages are equal if they have the same content in the
header, question, answer, and authority sections.
@rtype: bool"""
if not isinstance(other, Message):
return False
if self.id != other.id:
return False
if self.flags != other.flags:
return False
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
for n in self.answer:
if n not in other.answer:
return False
for n in other.answer:
if n not in self.answer:
return False
for n in self.authority:
if n not in other.authority:
return False
for n in other.authority:
if n not in self.authority:
return False
return True
def __ne__(self, other):
"""Are two messages not equal?
@rtype: bool"""
return not self.__eq__(other)
def is_response(self, other):
"""Is other a response to self?
@rtype: bool"""
if other.flags & dns.flags.QR == 0 or \
self.id != other.id or \
dns.opcode.from_flags(self.flags) != \
dns.opcode.from_flags(other.flags):
return False
if dns.rcode.from_flags(other.flags, other.ednsflags) != \
dns.rcode.NOERROR:
return True
if dns.opcode.is_update(self.flags):
return True
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
return True
def section_number(self, section):
if section is self.question:
return 0
elif section is self.answer:
return 1
elif section is self.authority:
return 2
elif section is self.additional:
return 3
else:
raise ValueError('unknown section')
def find_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Find the RRset with the given attributes in the specified section.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@raises KeyError: the RRset was not found and create was False
@rtype: dns.rrset.RRset object"""
key = (self.section_number(section),
name, rdclass, rdtype, covers, deleting)
if not force_unique:
if not self.index is None:
rrset = self.index.get(key)
if not rrset is None:
return rrset
else:
for rrset in section:
if rrset.match(name, rdclass, rdtype, covers, deleting):
return rrset
if not create:
raise KeyError
rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
section.append(rrset)
if not self.index is None:
self.index[key] = rrset
return rrset
def get_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Get the RRset with the given attributes in the specified section.
If the RRset is not found, None is returned.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@rtype: dns.rrset.RRset object or None"""
try:
rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
deleting, create, force_unique)
except KeyError:
rrset = None
return rrset
def to_wire(self, origin=None, max_size=0, **kw):
"""Return a string containing the message in DNS compressed wire
format.
Additional keyword arguments are passed to the rrset to_wire()
method.
@param origin: The origin to be appended to any relative names.
@type origin: dns.name.Name object
@param max_size: The maximum size of the wire format output; default
is 0, which means 'the message's request payload, if nonzero, or
65536'.
@type max_size: int
@raises dns.exception.TooBig: max_size was exceeded
@rtype: string
"""
if max_size == 0:
if self.request_payload != 0:
max_size = self.request_payload
else:
max_size = 65535
if max_size < 512:
max_size = 512
elif max_size > 65535:
max_size = 65535
r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
for rrset in self.question:
r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
for rrset in self.answer:
r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
for rrset in self.authority:
r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
if self.edns >= 0:
r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
for rrset in self.additional:
r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
r.write_header()
if not self.keyname is None:
r.add_tsig(self.keyname, self.keyring[self.keyname],
self.fudge, self.original_id, self.tsig_error,
self.other_data, self.request_mac,
self.keyalgorithm)
self.mac = r.mac
return r.get_wire()
def use_tsig(self, keyring, keyname=None, fudge=300,
original_id=None, tsig_error=0, other_data='',
algorithm=dns.tsig.default_algorithm):
"""When sending, a TSIG signature using the specified keyring
and keyname should be added.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@type keyname: dns.name.Name or string
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@param original_id: TSIG original id; defaults to the message's id
@type original_id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param algorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
if isinstance(keyname, (str, unicode)):
keyname = dns.name.from_text(keyname)
self.keyname = keyname
self.keyalgorithm = algorithm
self.fudge = fudge
if original_id is None:
self.original_id = self.id
else:
self.original_id = original_id
self.tsig_error = tsig_error
self.other_data = other_data
def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
"""Configure EDNS behavior.
@param edns: The EDNS level to use. Specifying None, False, or -1
means 'do not use EDNS', and in this case the other parameters are
ignored. Specifying True is equivalent to specifying 0, i.e. 'use
EDNS0'.
@type edns: int or bool or None
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
"""
if edns is None or edns is False:
edns = -1
if edns is True:
edns = 0
if request_payload is None:
request_payload = payload
if edns < 0:
ednsflags = 0
payload = 0
request_payload = 0
options = []
else:
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
if options is None:
options = []
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
self.options = options
self.request_payload = request_payload
def want_dnssec(self, wanted=True):
"""Enable or disable 'DNSSEC desired' flag in requests.
@param wanted: Is DNSSEC desired? If True, EDNS is enabled if
required, and then the DO bit is set. If False, the DO bit is
cleared if EDNS is enabled.
@type wanted: bool
"""
if wanted:
if self.edns < 0:
self.use_edns()
self.ednsflags |= dns.flags.DO
elif self.edns >= 0:
self.ednsflags &= ~dns.flags.DO
def rcode(self):
"""Return the rcode.
@rtype: int
"""
return dns.rcode.from_flags(self.flags, self.ednsflags)
def set_rcode(self, rcode):
"""Set the rcode.
@param rcode: the rcode
@type rcode: int
"""
(value, evalue) = dns.rcode.to_flags(rcode)
self.flags &= 0xFFF0
self.flags |= value
self.ednsflags &= 0x00FFFFFFL
self.ednsflags |= evalue
if self.ednsflags != 0 and self.edns < 0:
self.edns = 0
def opcode(self):
"""Return the opcode.
@rtype: int
"""
return dns.opcode.from_flags(self.flags)
def set_opcode(self, opcode):
"""Set the opcode.
@param opcode: the opcode
@type opcode: int
"""
self.flags &= 0x87FF
self.flags |= dns.opcode.to_flags(opcode)
class _WireReader(object):
"""Wire format reader.
@ivar wire: the wire-format message.
@type wire: string
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar current: When building a message object from wire format, this
variable contains the offset from the beginning of wire of the next octet
to be read.
@type current: int
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar one_rr_per_rrset: Put each RR into its own RRset?
@type one_rr_per_rrset: bool
@ivar ignore_trailing: Ignore trailing junk at end of request?
@type ignore_trailing: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
"""
def __init__(self, wire, message, question_only=False,
one_rr_per_rrset=False, ignore_trailing=False):
self.wire = dns.wiredata.maybe_wrap(wire)
self.message = message
self.current = 0
self.updating = False
self.zone_rdclass = dns.rdataclass.IN
self.question_only = question_only
self.one_rr_per_rrset = one_rr_per_rrset
self.ignore_trailing = ignore_trailing
def _get_question(self, qcount):
"""Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int"""
if self.updating and qcount > 1:
raise dns.exception.FormError
for i in xrange(0, qcount):
(qname, used) = dns.name.from_wire(self.wire, self.current)
if not self.message.origin is None:
qname = qname.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass) = \
struct.unpack('!HH',
self.wire[self.current:self.current + 4])
self.current = self.current + 4
self.message.find_rrset(self.message.question, qname,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
def _get_section(self, section, count):
"""Read the next I{count} records from the wire data and add them to
the specified section.
@param section: the section of the message to which to add records
@type section: list of dns.rrset.RRset objects
@param count: the number of records to read
@type count: int"""
if self.updating or self.one_rr_per_rrset:
force_unique = True
else:
force_unique = False
seen_opt = False
for i in xrange(0, count):
rr_start = self.current
(name, used) = dns.name.from_wire(self.wire, self.current)
absolute_name = name
if not self.message.origin is None:
name = name.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass, ttl, rdlen) = \
struct.unpack('!HHIH',
self.wire[self.current:self.current + 10])
self.current = self.current + 10
if rdtype == dns.rdatatype.OPT:
if not section is self.message.additional or seen_opt:
raise BadEDNS
self.message.payload = rdclass
self.message.ednsflags = ttl
self.message.edns = (ttl & 0xff0000) >> 16
self.message.options = []
current = self.current
optslen = rdlen
while optslen > 0:
(otype, olen) = \
struct.unpack('!HH',
self.wire[current:current + 4])
current = current + 4
opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
self.message.options.append(opt)
current = current + olen
optslen = optslen - 4 - olen
seen_opt = True
elif rdtype == dns.rdatatype.TSIG:
if not (section is self.message.additional and
i == (count - 1)):
raise BadTSIG
if self.message.keyring is None:
raise UnknownTSIGKey('got signed message without keyring')
secret = self.message.keyring.get(absolute_name)
if secret is None:
raise UnknownTSIGKey("key '%s' unknown" % name)
self.message.tsig_ctx = \
dns.tsig.validate(self.wire,
absolute_name,
secret,
int(time.time()),
self.message.request_mac,
rr_start,
self.current,
rdlen,
self.message.tsig_ctx,
self.message.multi,
self.message.first)
self.message.had_tsig = True
else:
if ttl < 0:
ttl = 0
if self.updating and \
(rdclass == dns.rdataclass.ANY or
rdclass == dns.rdataclass.NONE):
deleting = rdclass
rdclass = self.zone_rdclass
else:
deleting = None
if deleting == dns.rdataclass.ANY or \
(deleting == dns.rdataclass.NONE and \
section is self.message.answer):
covers = dns.rdatatype.NONE
rd = None
else:
rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
self.current, rdlen,
self.message.origin)
covers = rd.covers()
if self.message.xfr and rdtype == dns.rdatatype.SOA:
force_unique = True
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, force_unique)
if not rd is None:
rrset.add(rd, ttl)
self.current = self.current + rdlen
def read(self):
"""Read a wire format DNS message and build a dns.message.Message
object."""
l = len(self.wire)
if l < 12:
raise ShortHeader
(self.message.id, self.message.flags, qcount, ancount,
aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
self.current = 12
if dns.opcode.is_update(self.message.flags):
self.updating = True
self._get_question(qcount)
if self.question_only:
return
self._get_section(self.message.answer, ancount)
self._get_section(self.message.authority, aucount)
self._get_section(self.message.additional, adcount)
if not self.ignore_trailing and self.current != l:
raise TrailingJunk
if self.message.multi and self.message.tsig_ctx and \
not self.message.had_tsig:
self.message.tsig_ctx.update(self.wire)
def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
tsig_ctx = None, multi = False, first = True,
question_only = False, one_rr_per_rrset = False,
ignore_trailing = False):
"""Convert a DNS wire format message into a message
object.
@param keyring: The keyring to use if the message is signed.
@type keyring: dict
@param request_mac: If the message is a response to a TSIG-signed request,
I{request_mac} should be set to the MAC of that request.
@type request_mac: string
@param xfr: Is this message part of a zone transfer?
@type xfr: bool
@param origin: If the message is part of a zone transfer, I{origin}
should be the origin name of the zone.
@type origin: dns.name.Name object
@param tsig_ctx: The ongoing TSIG context, used when validating zone
transfers.
@type tsig_ctx: hmac.HMAC object
@param multi: Is this message part of a multiple message sequence?
@type multi: bool
@param first: Is this message standalone, or the first of a multi
message sequence?
@type first: bool
@param question_only: Read only up to the end of the question section?
@type question_only: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
@param ignore_trailing: Ignore trailing junk at end of request?
@type ignore_trailing: bool
@raises ShortHeader: The message is less than 12 octets long.
@raises TrailingJunk: There were octets in the message past the end
of the proper DNS message.
@raises BadEDNS: An OPT record was in the wrong section, or occurred more
than once.
@raises BadTSIG: A TSIG record was not the last record of the additional
data section.
@rtype: dns.message.Message object"""
m = Message(id=0)
m.keyring = keyring
m.request_mac = request_mac
m.xfr = xfr
m.origin = origin
m.tsig_ctx = tsig_ctx
m.multi = multi
m.first = first
reader = _WireReader(wire, m, question_only, one_rr_per_rrset,
ignore_trailing)
reader.read()
return m
class _TextReader(object):
"""Text format reader.
@ivar tok: the tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
@ivar last_name: The most recently read name when building a message object
from text format.
@type last_name: dns.name.Name object
"""
def __init__(self, text, message):
self.message = message
self.tok = dns.tokenizer.Tokenizer(text)
self.last_name = None
self.zone_rdclass = dns.rdataclass.IN
self.updating = False
def _header_line(self, section):
"""Process one line from the text format header section."""
token = self.tok.get()
what = token.value
if what == 'id':
self.message.id = self.tok.get_int()
elif what == 'flags':
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.flags = self.message.flags | \
dns.flags.from_text(token.value)
if dns.opcode.is_update(self.message.flags):
self.updating = True
elif what == 'edns':
self.message.edns = self.tok.get_int()
self.message.ednsflags = self.message.ednsflags | \
(self.message.edns << 16)
elif what == 'eflags':
if self.message.edns < 0:
self.message.edns = 0
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.ednsflags = self.message.ednsflags | \
dns.flags.edns_from_text(token.value)
elif what == 'payload':
self.message.payload = self.tok.get_int()
if self.message.edns < 0:
self.message.edns = 0
elif what == 'opcode':
text = self.tok.get_string()
self.message.flags = self.message.flags | \
dns.opcode.to_flags(dns.opcode.from_text(text))
elif what == 'rcode':
text = self.tok.get_string()
self.message.set_rcode(dns.rcode.from_text(text))
else:
raise UnknownHeaderField
self.tok.get_eol()
def _question_line(self, section):
"""Process one line from the text format question section."""
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
self.message.find_rrset(self.message.question, name,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
self.tok.get_eol()
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl)
def read(self):
"""Read a text format DNS message and build a dns.message.Message
object."""
line_method = self._header_line
section = None
while 1:
token = self.tok.get(True, True)
if token.is_eol_or_eof():
break
if token.is_comment():
u = token.value.upper()
if u == 'HEADER':
line_method = self._header_line
elif u == 'QUESTION' or u == 'ZONE':
line_method = self._question_line
section = self.message.question
elif u == 'ANSWER' or u == 'PREREQ':
line_method = self._rr_line
section = self.message.answer
elif u == 'AUTHORITY' or u == 'UPDATE':
line_method = self._rr_line
section = self.message.authority
elif u == 'ADDITIONAL':
line_method = self._rr_line
section = self.message.additional
self.tok.get_eol()
continue
self.tok.unget(token)
line_method(section)
def from_text(text):
"""Convert the text format message into a message object.
@param text: The text format message.
@type text: string
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
m = Message()
reader = _TextReader(text, m)
reader.read()
return m
def from_file(f):
"""Read the next text format message from the specified file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
m = from_text(f)
finally:
if want_close:
f.close()
return m
def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
want_dnssec=False, ednsflags=0, payload=1280,
request_payload=None, options=None):
"""Make a query message.
The query name, type, and class may all be specified either
as objects of the appropriate type, or as strings.
The query will have a randomly choosen query id, and its DNS flags
will be set to dns.flags.RD.
@param qname: The query name.
@type qname: dns.name.Name object or string
@param rdtype: The desired rdata type.
@type rdtype: int
@param rdclass: The desired rdata class; the default is class IN.
@type rdclass: int
@param use_edns: The EDNS level to use; the default is None (no EDNS).
See the description of dns.message.Message.use_edns() for the possible
values for use_edns and their meanings.
@type use_edns: int or bool or None
@param want_dnssec: Should the query indicate that DNSSEC is desired?
@type want_dnssec: bool
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
@rtype: dns.message.Message object"""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
m = Message()
m.flags |= dns.flags.RD
m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
force_unique=True)
m.use_edns(use_edns, ednsflags, payload, request_payload, options)
m.want_dnssec(want_dnssec)
return m
def make_response(query, recursion_available=False, our_payload=8192):
"""Make a message which is a response for the specified query.
The message returned is really a response skeleton; it has all
of the infrastructure required of a response, but none of the
content.
The response's question section is a shallow copy of the query's
question section, so the query's question RRsets should not be
changed.
@param query: the query to respond to
@type query: dns.message.Message object
@param recursion_available: should RA be set in the response?
@type recursion_available: bool
@param our_payload: payload size to advertise in EDNS responses; default
is 8192.
@type our_payload: int
@rtype: dns.message.Message object"""
if query.flags & dns.flags.QR:
raise dns.exception.FormError('specified query message is not a query')
response = dns.message.Message(query.id)
response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
if recursion_available:
response.flags |= dns.flags.RA
response.set_opcode(query.opcode())
response.question = list(query.question)
if query.edns >= 0:
response.use_edns(0, 0, our_payload, query.payload)
if not query.keyname is None:
response.keyname = query.keyname
response.keyring = query.keyring
response.request_mac = query.mac
return response
|
gpl-3.0
|
DennisDenuto/puppet-commonscripts
|
files/aws_cli/AWS-ElasticBeanstalk-CLI-2.6.3/eb/macosx/python2.7/lib/aws/http_client.py
|
11
|
1992
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from httplib import HTTPSConnection
import os
import socket
import ssl
from urllib2 import HTTPSHandler
from scli.constants import CABundle
from lib.utility import shell_utils
HTTP_GET = 'GET'
HTTP_POST = 'POST'
class CaValidationHttpsConnection(HTTPSConnection):
'''Override HTTPSConnection to verify server certification'''
def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock,
ssl_version = ssl.PROTOCOL_TLSv1,
cert_reqs = ssl.CERT_REQUIRED,
ca_certs = os.path.join(shell_utils.ori_path(),
CABundle.Path,
CABundle.Name))
class CaValidationHttpsHandler(HTTPSHandler):
'''Override HTTPSHandler to use CaValidationHttpsConnection for connection'''
def https_open(self, req):
return self.do_open(CaValidationHttpsConnection, req)
|
mit
|
misttechnologies/selenium
|
py/test/selenium/webdriver/common/select_class_tests.py
|
19
|
15214
|
#!/usr/bin/python
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import unittest
from selenium.common.exceptions import NoSuchElementException, ElementNotSelectableException, UnexpectedTagNameException
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
disabledSelect = { 'name': 'no-select', 'values': ['Foo']}
singleSelectValues1 = { 'name': 'selectomatic', 'values': ['One', 'Two', 'Four', 'Still learning how to count, apparently']}
singleSelectValues2 = { 'name': 'redirect', 'values': ['One', 'Two']}
singleSelectValuesWithSpaces = { 'name': 'select_with_spaces', 'values': ['One', 'Two', 'Four', 'Still learning how to count, apparently']}
multiSelectValues1 = { 'name': 'multi', 'values': ['Eggs', 'Ham', 'Sausages', 'Onion gravy']}
multiSelectValues2 = { 'name': 'select_empty_multiple', 'values': ['select_1', 'select_2', 'select_3', 'select_4']}
class WebDriverSelectSupportTests(unittest.TestCase):
def testSelectByIndexSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
sel.select_by_index(x)
self.assertEqual(sel.first_selected_option.text, select['values'][x])
@pytest.mark.xfail
# disabled select isn't immediatedly throwing an ElementNotSelectableException when trying to select
def testSelectDisabledByIndexShouldThrowException(self):
self._loadPage("formPage")
try:
sel = Select(self.driver.find_element(By.NAME, disabledSelect['name']))
sel.select_by_index(0)
raise Exception("Didn't get an expected ElementNotSelectableException exception.")
except ElementNotSelectableException:
pass # should get this exception
def testSelectByValueSingle(self):
if self.driver.capabilities['browserName'] == 'chrome':
pytest.xfail("chrome currently doesn't allow css selectors with comma's in them that are not compound");
self._loadPage("formPage")
for select in [singleSelectValues1]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
sel.select_by_value(select['values'][x].lower())
self.assertEqual(sel.first_selected_option.text, select['values'][x])
# disabled select isn't immediatedly throwing an ElementNotSelectableException when trying to select
@pytest.mark.xfail
def testSelectDisabledByValueShouldThrowException(self):
self._loadPage("formPage")
try:
sel = Select(self.driver.find_element(By.NAME, disabledSelect['name']))
sel.select_by_value('foo')
raise Exception("Didn't get an expected ElementNotSelectableException exception.")
except ElementNotSelectableException:
pass
def testSelectByVisibleTextSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
print(select['values'][x])
sel.select_by_visible_text(select['values'][x])
self.assertEqual(sel.first_selected_option.text, select['values'][x])
def testSelectByVisibleTextShouldNormalizeSpaces(self):
self._loadPage("formPage")
for select in [singleSelectValuesWithSpaces]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
for x in range(len(select['values'])):
print(select['values'][x])
sel.select_by_visible_text(select['values'][x])
self.assertEqual(sel.first_selected_option.text, select['values'][x])
@pytest.mark.xfail
# disabled select isn't immediatedly throwing an ElementNotSelectableException when trying to select
def testSelectDisabledByVisibleTextShouldThrowException(self):
self._loadPage("formPage")
try:
sel = Select(self.driver.find_element(By.NAME, disabledSelect['name']))
sel.select_by_visible_text('foo')
raise Exception("Didn't get an expected ElementNotSelectableException exception.")
except ElementNotSelectableException:
pass
def testSelectByIndexMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_index(x)
selected = sel.all_selected_options
self.assertEqual(len(selected), x+1)
for j in range(len(selected)):
self.assertEqual(selected[j].text, select['values'][j])
def testSelectByValueMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_value(select['values'][x].lower())
selected = sel.all_selected_options
self.assertEqual(len(selected), x+1)
for j in range(len(selected)):
self.assertEqual(selected[j].text, select['values'][j])
def testSelectByVisibleTextMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
for x in range(len(select['values'])):
sel.select_by_visible_text(select['values'][x])
selected = sel.all_selected_options
self.assertEqual(len(selected), x+1)
for j in range(len(selected)):
self.assertEqual(selected[j].text, select['values'][j])
def testDeselectAllSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2]:
try:
Select(self.driver.find_element(By.NAME, select['name'])).deselect_all()
raise Exception("Didn't get an expected NotImplementedError.")
except NotImplementedError:
pass # should get this exception
def testDeselectAllMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
self.assertEqual(len(sel.all_selected_options), 0)
def testDeselectByIndexSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2]:
try:
Select(self.driver.find_element(By.NAME, select['name'])).deselect_by_index(0)
raise Exception("Didn't get an expected NotImplementedError.")
except NotImplementedError:
pass # should get this exception
def testDeselectByValueSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2]:
try:
Select(self.driver.find_element(By.NAME, select['name'])).deselect_by_value(select['values'][0].lower())
raise Exception("Didn't get an expected NotImplementedError.")
except NotImplementedError:
pass # should get this exception
def testDeselectByVisibleTextSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2]:
try:
Select(self.driver.find_element(By.NAME, select['name'])).deselect_by_visible_text(select['values'][0])
raise Exception("Didn't get an expected NotImplementedError.")
except NotImplementedError:
pass # should get this exception
def testDeselectByIndexMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_index(1)
sel.deselect_by_index(3)
selected = sel.all_selected_options
self.assertEqual(len(selected), 2)
self.assertEqual(selected[0].text, select['values'][0])
self.assertEqual(selected[1].text, select['values'][2])
def testDeselectByValueMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_value(select['values'][1].lower())
sel.deselect_by_value(select['values'][3].lower())
selected = sel.all_selected_options
self.assertEqual(len(selected), 2)
self.assertEqual(selected[0].text, select['values'][0])
self.assertEqual(selected[1].text, select['values'][2])
def testDeselectByVisibleTextMultiple(self):
if self.driver.capabilities['browserName'] == 'chrome' and int(self.driver.capabilities['version'].split('.')[0]) < 16:
pytest.skip("deselecting preselected values only works on chrome >= 16")
self._loadPage("formPage")
for select in [multiSelectValues1, multiSelectValues2]:
sel = Select(self.driver.find_element(By.NAME, select['name']))
sel.deselect_all()
sel.select_by_index(0)
sel.select_by_index(1)
sel.select_by_index(2)
sel.select_by_index(3)
sel.deselect_by_visible_text(select['values'][1])
sel.deselect_by_visible_text(select['values'][3])
selected = sel.all_selected_options
self.assertEqual(len(selected), 2)
self.assertEqual(selected[0].text, select['values'][0])
self.assertEqual(selected[1].text, select['values'][2])
def testGetOptions(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2, multiSelectValues1, multiSelectValues2]:
opts = Select(self.driver.find_element(By.NAME, select['name'])).options
self.assertEqual(len(opts), len(select['values']))
for i in range(len(opts)):
self.assertEqual(opts[i].text, select['values'][i])
def testGetAllSelectedOptionsSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2, disabledSelect]:
opts = Select(self.driver.find_element(By.NAME, select['name'])).all_selected_options
self.assertEqual(len(opts), 1)
self.assertEqual(opts[0].text, select['values'][0])
def testGetAllSelectedOptionsMultiple(self):
self._loadPage("formPage")
opts = Select(self.driver.find_element(By.NAME, multiSelectValues1['name'])).all_selected_options
self.assertEqual(len(opts), 2)
self.assertEqual(opts[0].text, multiSelectValues1['values'][0])
self.assertEqual(opts[1].text, multiSelectValues1['values'][2])
opts = Select(self.driver.find_element(By.NAME, multiSelectValues2['name'])).all_selected_options
self.assertEqual(len(opts), 0)
def testGetFirstSelectedOptionSingle(self):
self._loadPage("formPage")
for select in [singleSelectValues1, singleSelectValues2]:
opt = Select(self.driver.find_element(By.NAME, select['name'])).first_selected_option
self.assertEqual(opt.text, select['values'][0])
def testGetFirstSelectedOptionMultiple(self):
self._loadPage("formPage")
opt = Select(self.driver.find_element(By.NAME, multiSelectValues1['name'])).first_selected_option
self.assertEqual(opt.text, multiSelectValues1['values'][0])
opt = Select(self.driver.find_element(By.NAME, multiSelectValues2['name'])).all_selected_options
self.assertEqual(len(opt), 0)
def testRaisesExceptionForInvalidTagName(self):
self._loadPage("formPage")
try:
Select(self.driver.find_element(By.TAG_NAME, "div"))
raise Exception("Should have gotten an UnexpectedTagNameException to be raised")
except UnexpectedTagNameException:
pass
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
apache-2.0
|
hsteinhaus/ardupilot
|
mk/PX4/Tools/genmsg/src/genmsg/__init__.py
|
215
|
2116
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . base import EXT_MSG, EXT_SRV, SEP, log, plog, InvalidMsgSpec, log_verbose, MsgGenerationException
from . gentools import compute_md5, compute_full_text, compute_md5_text
from . names import resource_name_base, package_resource_name, is_legal_resource_base_name, \
resource_name_package, resource_name, is_legal_resource_name
from . msgs import HEADER, TIME, DURATION, MsgSpec, Constant, Field
from . msg_loader import MsgNotFound, MsgContext, load_depends, load_msg_by_type, load_srv_by_type
from . srvs import SrvSpec
|
gpl-3.0
|
mfriesen/tentacle
|
src/tentacle/cthulhu/__init__.py
|
1
|
2464
|
#!python
import json
import os.path
import cherrypy
from cherrypy import tools
from mako.lookup import TemplateLookup
from tentacle.cthulhu.operation import CthulhuData
from tentacle.cthulhu.datastore import *
from tentacle.shared.screed import Screed
from aetypes import end
print '------------- CThulhu is alive'
current_dir = os.path.dirname(os.path.abspath(__file__))
class ActionRoot:
@cherrypy.expose
def spawns(self):
response = cherrypy.response
response.headers['Content-Type'] = 'application/json'
spawns = CthulhuData.spawns()
return json.dumps(spawns)
class ScreedRoot:
@cherrypy.expose
def index(self):
screeds = get_screeds()
mylookup = TemplateLookup(directories=[current_dir + '/webroot'])
mytemplate = mylookup.get_template('screed.html')
return mytemplate.render(screeds=screeds)
@cherrypy.expose
def edit(self, **kwargs):
if cherrypy.request.method != 'GET':
base = ScreedBase()
screed = Screed()
for key, value in kwargs.iteritems():
setattr(base, key, value)
if 'steps' in kwargs:
if hasattr(kwargs['steps'], "strip"):
screed.add_fn(0, "fn", kwargs['steps'])
else:
idx = 0
for val in kwargs['steps']:
screed.add_fn(idx, "fn", val)
idx += 1
base.text = screed.to_json()
base = save_screed(base)
return str(base.id)
screed = None
if 'id' in kwargs:
screed = get_screed(kwargs['id'])
mylookup = TemplateLookup(directories=[current_dir + '/webroot'])
mytemplate = mylookup.get_template('screed-edit.html')
return mytemplate.render(screed = screed)
@cherrypy.expose
def delete(self, **kwargs):
if 'id' in kwargs:
delete_screed(kwargs['id'])
raise cherrypy.HTTPRedirect("/screed")
class Root:
@cherrypy.expose
def index(self):
spawns = CthulhuData.spawns()
mylookup = TemplateLookup(directories=[current_dir + '/webroot'])
mytemplate = mylookup.get_template('index.html')
return mytemplate.render(spawns=spawns)
|
apache-2.0
|
litvinchuck/python-scripts
|
daemon/daemon_builder.py
|
2
|
1062
|
import signal
from .daemon import Daemon
from .signal_handler import SignalHandler
class DaemonBuilder:
"""Builder class for Daemon"""
@staticmethod
def build(main_function, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
"""Builds the daemon and returns SignalHandler instance
Args:
main_function (function): function to be run by the daemon, should accept stdin, stdout and stderr as arguments
pidfile (str): file containing the process identification number (pid)
stdin (str): standard input stream file. Defaults to /dev/null
stdout (str): standard output stream file. Defaults to /dev/null
stderr (str): standard error stream file. Defaults to /dev/null
Returns:
SignalHandler: instance
"""
daemon = Daemon(main_function, pidfile, stdin, stdout, stderr)
handler = SignalHandler(daemon)
signal.signal(signal.SIGTERM, handler.handle)
return handler
|
mit
|
ouiliame/ps.py
|
xlwt/Row.py
|
30
|
11779
|
# -*- coding: windows-1252 -*-
import BIFFRecords
import Style
from Cell import StrCell, BlankCell, NumberCell, FormulaCell, MulBlankCell, BooleanCell, ErrorCell, \
_get_cells_biff_data_mul
import ExcelFormula
import datetime as dt
from Formatting import Font
try:
from decimal import Decimal
except ImportError:
# Python 2.3: decimal not supported; create dummy Decimal class
class Decimal(object):
pass
class Row(object):
__slots__ = [# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__xf_index",
"__has_default_xf_index",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"height_mismatch",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
def __init__(self, rowx, parent_sheet):
if not (isinstance(rowx, int) and 0 <= rowx <= 65535):
raise ValueError("row index (%r) not an int in range(65536)" % rowx)
self.__idx = rowx
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = {}
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__xf_index = 0x0F
self.__has_default_xf_index = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.height_mismatch = 0
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
iarg = int(arg)
if not ((0 <= iarg <= 255) and arg == iarg):
raise ValueError("column index (%r) not an int in range(256)" % arg)
sheet = self.__parent
if iarg < self.__min_col_idx:
self.__min_col_idx = iarg
if iarg > self.__max_col_idx:
self.__max_col_idx = iarg
if iarg < sheet.first_used_col:
sheet.first_used_col = iarg
if iarg > sheet.last_used_col:
sheet.last_used_col = iarg
def __excel_date_dt(self, date):
adj = False
if isinstance(date, dt.date):
if self.__parent_wb.dates_1904:
epoch_tuple = (1904, 1, 1)
else:
epoch_tuple = (1899, 12, 31)
adj = True
if isinstance(date, dt.datetime):
epoch = dt.datetime(*epoch_tuple)
else:
epoch = dt.date(*epoch_tuple)
else: # it's a datetime.time instance
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1)
delta = date - epoch
xldate = delta.days + delta.seconds / 86400.0
# Add a day for Excel's missing leap day in 1900
if adj and xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
self.__has_default_xf_index = 1
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__max_col_idx
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (self.height_mismatch & 0x01) << 6
options |= (self.__has_default_xf_index & 0x01) << 7
options |= (0x01 & 0x01) << 8
options |= (self.__xf_index & 0x0FFF) << 16
options |= (self.space_above & 1) << 28
options |= (self.space_below & 1) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx,
self.__max_col_idx, height_options, options).get()
def insert_cell(self, col_index, cell_obj):
if col_index in self.__cells:
if not self.__parent._cell_overwrite_ok:
msg = "Attempt to overwrite cell: sheetname=%r rowx=%d colx=%d" \
% (self.__parent.name, self.__idx, col_index)
raise Exception(msg)
prev_cell_obj = self.__cells[col_index]
sst_idx = getattr(prev_cell_obj, 'sst_idx', None)
if sst_idx is not None:
self.__parent_wb.del_str(sst_idx)
self.__cells[col_index] = cell_obj
def insert_mulcells(self, colx1, colx2, cell_obj):
self.insert_cell(colx1, cell_obj)
for col_index in xrange(colx1+1, colx2+1):
self.insert_cell(col_index, None)
def get_cells_biff_data(self):
cell_items = [item for item in self.__cells.iteritems() if item[1] is not None]
cell_items.sort() # in column order
return _get_cells_biff_data_mul(self.__idx, cell_items)
# previously:
# return ''.join([cell.get_biff_data() for colx, cell in cell_items])
def get_index(self):
return self.__idx
def set_cell_text(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, StrCell(self.__idx, colx, xf_index, self.__parent_wb.add_str(value)))
def set_cell_blank(self, colx, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BlankCell(self.__idx, colx, xf_index))
def set_cell_mulblanks(self, first_colx, last_colx, style=Style.default_style):
assert 0 <= first_colx <= last_colx <= 255
self.__adjust_height(style)
self.__adjust_bound_col_idx(first_colx, last_colx)
xf_index = self.__parent_wb.add_style(style)
# ncols = last_colx - first_colx + 1
self.insert_mulcells(first_colx, last_colx, MulBlankCell(self.__idx, first_colx, last_colx, xf_index))
def set_cell_number(self, colx, number, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, NumberCell(self.__idx, colx, xf_index, number))
def set_cell_date(self, colx, datetime_obj, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx,
NumberCell(self.__idx, colx, xf_index, self.__excel_date_dt(datetime_obj)))
def set_cell_formula(self, colx, formula, style=Style.default_style, calc_flags=0):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.__parent_wb.add_sheet_reference(formula)
self.insert_cell(colx, FormulaCell(self.__idx, colx, xf_index, formula, calc_flags=0))
def set_cell_boolean(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BooleanCell(self.__idx, colx, xf_index, bool(value)))
def set_cell_error(self, colx, error_string_or_code, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, ErrorCell(self.__idx, colx, xf_index, error_string_or_code))
def write(self, col, label, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
style_index = self.__parent_wb.add_style(style)
if isinstance(label, basestring):
if len(label) > 0:
self.insert_cell(col,
StrCell(self.__idx, col, style_index, self.__parent_wb.add_str(label))
)
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, bool): # bool is subclass of int; test bool first
self.insert_cell(col, BooleanCell(self.__idx, col, style_index, label))
elif isinstance(label, (float, int, long, Decimal)):
self.insert_cell(col, NumberCell(self.__idx, col, style_index, label))
elif isinstance(label, (dt.datetime, dt.date, dt.time)):
date_number = self.__excel_date_dt(label)
self.insert_cell(col, NumberCell(self.__idx, col, style_index, date_number))
elif label is None:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, ExcelFormula.Formula):
self.__parent_wb.add_sheet_reference(label)
self.insert_cell(col, FormulaCell(self.__idx, col, style_index, label))
elif isinstance(label, (list, tuple)):
self.__rich_text_helper(col, label, style, style_index)
else:
raise Exception("Unexpected data type %r" % type(label))
def set_cell_rich_text(self, col, rich_text_list, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
if not isinstance(rich_text_list, (list, tuple)):
raise Exception("Unexpected data type %r" % type(rich_text_list))
self.__rich_text_helper(col, rich_text_list, style)
def __rich_text_helper(self, col, rich_text_list, style, style_index=None):
if style_index is None:
style_index = self.__parent_wb.add_style(style)
default_font = None
rt = []
for data in rich_text_list:
if isinstance(data, basestring):
s = data
font = default_font
elif isinstance(data, (list, tuple)):
if not isinstance(data[0], basestring) or not isinstance(data[1], Font):
raise Exception ("Unexpected data type %r, %r" % (type(data[0]), type(data[1])))
s = data[0]
font = self.__parent_wb.add_font(data[1])
else:
raise Exception ("Unexpected data type %r" % type(data))
if s:
rt.append((s, font))
if default_font is None:
default_font = self.__parent_wb.add_font(style.font)
if rt:
self.insert_cell(col, StrCell(self.__idx, col, style_index, self.__parent_wb.add_rt(rt)))
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
write_blanks = set_cell_mulblanks
write_rich_text = set_cell_rich_text
|
mit
|
40223221/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/xmlbuilder.py
|
873
|
12377
|
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
|
gpl-3.0
|
rahulunair/nova
|
nova/tests/unit/scheduler/client/test_report.py
|
1
|
174484
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
import fixtures
from keystoneauth1 import exceptions as ks_exc
import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from six.moves.urllib import parse
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_requests
CONF = nova.conf.CONF
class SafeConnectedTestCase(test.NoDBTestCase):
"""Test the safe_connect decorator for the scheduler client."""
def setUp(self):
super(SafeConnectedTestCase, self).setUp()
self.context = context.get_admin_context()
with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'):
self.client = report.SchedulerReportClient()
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint(self, req):
"""Test EndpointNotFound behavior.
A missing endpoint entry should not explode.
"""
req.side_effect = ks_exc.EndpointNotFound()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_client')
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint_create_client(self, req, create_client):
"""Test EndpointNotFound retry behavior.
A missing endpoint should cause _create_client to be called.
"""
req.side_effect = ks_exc.EndpointNotFound()
self.client._get_resource_provider(self.context, "fake")
# This is the second time _create_client is called, but the first since
# the mock was created.
self.assertTrue(create_client.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_auth(self, req):
"""Test Missing Auth handled correctly.
A missing auth configuration should not explode.
"""
req.side_effect = ks_exc.MissingAuthPlugin()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_unauthorized(self, req):
"""Test Unauthorized handled correctly.
An unauthorized configuration should not explode.
"""
req.side_effect = ks_exc.Unauthorized()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch('keystoneauth1.session.Session.request')
def test_connect_fail(self, req):
"""Test Connect Failure handled correctly.
If we get a connect failure, this is transient, and we expect
that this will end up working correctly later.
"""
req.side_effect = ks_exc.ConnectFailure()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls do
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
@mock.patch.object(report, 'LOG')
def test_warning_limit(self, mock_log):
# Assert that __init__ initializes _warn_count as we expect
self.assertEqual(0, self.client._warn_count)
mock_self = mock.MagicMock()
mock_self._warn_count = 0
for i in range(0, report.WARN_EVERY + 3):
report.warn_limit(mock_self, 'warning')
mock_log.warning.assert_has_calls([mock.call('warning'),
mock.call('warning')])
@mock.patch('keystoneauth1.session.Session.request')
def test_failed_discovery(self, req):
"""Test DiscoveryFailure behavior.
Failed discovery should not blow up.
"""
req.side_effect = ks_exc.DiscoveryFailure()
self.client._get_resource_provider(self.context, "fake")
# reset the call count to demonstrate that future calls still
# work
req.reset_mock()
self.client._get_resource_provider(self.context, "fake")
self.assertTrue(req.called)
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
ksafx = self.useFixture(nova_fixtures.KSAFixture())
self.load_auth_mock = ksafx.mock_load_auth
self.load_sess_mock = ksafx.mock_load_sess
def test_constructor(self):
client = report.SchedulerReportClient()
self.load_auth_mock.assert_called_once_with(CONF, 'placement')
self.load_sess_mock.assert_called_once_with(
CONF, 'placement', auth=self.load_auth_mock.return_value)
self.assertEqual(['internal', 'public'], client._client.interface)
self.assertEqual({'accept': 'application/json'},
client._client.additional_headers)
def test_constructor_admin_interface(self):
self.flags(valid_interfaces='admin', group='placement')
client = report.SchedulerReportClient()
self.load_auth_mock.assert_called_once_with(CONF, 'placement')
self.load_sess_mock.assert_called_once_with(
CONF, 'placement', auth=self.load_auth_mock.return_value)
self.assertEqual(['admin'], client._client.interface)
self.assertEqual({'accept': 'application/json'},
client._client.additional_headers)
class SchedulerReportClientTestCase(test.NoDBTestCase):
def setUp(self):
super(SchedulerReportClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.useFixture(nova_fixtures.KSAFixture())
self.ks_adap_mock = mock.Mock()
self.compute_node = objects.ComputeNode(
uuid=uuids.compute_node,
hypervisor_hostname='foo',
vcpus=8,
cpu_allocation_ratio=16.0,
memory_mb=1024,
ram_allocation_ratio=1.5,
local_gb=10,
disk_allocation_ratio=1.0,
)
self.client = report.SchedulerReportClient(self.ks_adap_mock)
def _init_provider_tree(self, generation_override=None,
resources_override=None):
cn = self.compute_node
resources = resources_override
if resources_override is None:
resources = {
'VCPU': {
'total': cn.vcpus,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.vcpus,
'step_size': 1,
'allocation_ratio': cn.cpu_allocation_ratio,
},
'MEMORY_MB': {
'total': cn.memory_mb,
'reserved': 512,
'min_unit': 1,
'max_unit': cn.memory_mb,
'step_size': 1,
'allocation_ratio': cn.ram_allocation_ratio,
},
'DISK_GB': {
'total': cn.local_gb,
'reserved': 0,
'min_unit': 1,
'max_unit': cn.local_gb,
'step_size': 1,
'allocation_ratio': cn.disk_allocation_ratio,
},
}
generation = generation_override or 1
rp_uuid = self.client._provider_tree.new_root(
cn.hypervisor_hostname,
cn.uuid,
generation=generation,
)
self.client._provider_tree.update_inventory(rp_uuid, resources)
def _validate_provider(self, name_or_uuid, **kwargs):
"""Validates existence and values of a provider in this client's
_provider_tree.
:param name_or_uuid: The name or UUID of the provider to validate.
:param kwargs: Optional keyword arguments of ProviderData attributes
whose values are to be validated.
"""
found = self.client._provider_tree.data(name_or_uuid)
# If kwargs provided, their names indicate ProviderData attributes
for attr, expected in kwargs.items():
try:
self.assertEqual(getattr(found, attr), expected)
except AttributeError:
self.fail("Provider with name or UUID %s doesn't have "
"attribute %s (expected value: %s)" %
(name_or_uuid, attr, expected))
class TestPutAllocations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations(self, mock_put):
mock_put.return_value.status_code = 204
mock_put.return_value.text = "cool"
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
payload = {
"allocations": {
rp_uuid: {"resources": data}
},
"project_id": mock.sentinel.project_id,
"user_id": mock.sentinel.user_id,
"consumer_generation": mock.sentinel.consumer_generation
}
resp = self.client.put_allocations(
self.context, consumer_uuid, payload)
self.assertTrue(resp)
mock_put.assert_called_once_with(
expected_url, payload, version='1.28',
global_request_id=self.context.global_id)
@mock.patch.object(report.LOG, 'warning')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_fail(self, mock_put, mock_warn):
mock_put.return_value.status_code = 400
mock_put.return_value.text = "not cool"
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
payload = {
"allocations": {
rp_uuid: {"resources": data}
},
"project_id": mock.sentinel.project_id,
"user_id": mock.sentinel.user_id,
"consumer_generation": mock.sentinel.consumer_generation
}
resp = self.client.put_allocations(
self.context, consumer_uuid, payload)
self.assertFalse(resp)
mock_put.assert_called_once_with(
expected_url, payload, version='1.28',
global_request_id=self.context.global_id)
log_msg = mock_warn.call_args[0][0]
self.assertIn("Failed to save allocation for", log_msg)
def test_put_allocations_fail_connection_error(self):
self.ks_adap_mock.put.side_effect = ks_exc.EndpointNotFound()
self.assertRaises(
exception.PlacementAPIConnectFailure, self.client.put_allocations,
self.context, mock.sentinel.consumer, mock.sentinel.payload)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_fail_due_to_consumer_generation_conflict(
self, mock_put):
mock_put.return_value = fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': 'consumer generation conflict'}]}))
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
payload = {
"allocations": {
rp_uuid: {"resources": data}
},
"project_id": mock.sentinel.project_id,
"user_id": mock.sentinel.user_id,
"consumer_generation": mock.sentinel.consumer_generation
}
self.assertRaises(exception.AllocationUpdateFailed,
self.client.put_allocations,
self.context, consumer_uuid, payload)
mock_put.assert_called_once_with(
expected_url, mock.ANY, version='1.28',
global_request_id=self.context.global_id)
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retries_conflict(self, mock_put):
failed = fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': ''}]}))
succeeded = mock.MagicMock()
succeeded.status_code = 204
mock_put.side_effect = (failed, succeeded)
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
payload = {
"allocations": {
rp_uuid: {"resources": data}
},
"project_id": mock.sentinel.project_id,
"user_id": mock.sentinel.user_id,
"consumer_generation": mock.sentinel.consumer_generation
}
resp = self.client.put_allocations(
self.context, consumer_uuid, payload)
self.assertTrue(resp)
mock_put.assert_has_calls([
mock.call(expected_url, payload, version='1.28',
global_request_id=self.context.global_id)] * 2)
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
def test_put_allocations_retry_gives_up(self, mock_put):
failed = fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': ''}]}))
mock_put.return_value = failed
rp_uuid = mock.sentinel.rp
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
payload = {
"allocations": {
rp_uuid: {"resources": data}
},
"project_id": mock.sentinel.project_id,
"user_id": mock.sentinel.user_id,
"consumer_generation": mock.sentinel.consumer_generation
}
resp = self.client.put_allocations(
self.context, consumer_uuid, payload)
self.assertFalse(resp)
mock_put.assert_has_calls([
mock.call(expected_url, payload, version='1.28',
global_request_id=self.context.global_id)] * 3)
def test_claim_resources_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {'allocations': {
rp_uuid: alloc
for rp_uuid, alloc in alloc_req['allocations'].items()}}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=expected_payload,
global_request_id=self.context.global_id)
self.assertTrue(res)
def test_claim_resources_older_alloc_req(self):
"""Test the case when a stale allocation request is sent to the report
client to claim
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.12')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations': {
rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()},
# no consumer generation in the payload as the caller requested
# older microversion to be used
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.12', json=expected_payload,
global_request_id=self.context.global_id)
self.assertTrue(res)
def test_claim_resources_success_resize_to_same_host_no_shared(self):
"""Tests resize to the same host operation. In this case allocation
exists against the same host RP but with the migration_uuid.
"""
get_current_allocations_resp_mock = mock.Mock(status_code=200)
# source host allocation held by the migration_uuid so it is not
# not returned to the claim code as that asks for the instance_uuid
# consumer
get_current_allocations_resp_mock.json.return_value = {
'allocations': {},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
put_allocations_resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
consumer_uuid = uuids.consumer_uuid
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
# are all being increased but on the same host. We also throw a custom
# resource class in the new allocation to make sure it's not lost
alloc_req = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048,
'DISK_GB': 40,
'CUSTOM_FOO': 1
}
},
},
# this allocation request comes from the scheduler therefore it
# does not have consumer_generation in it.
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048,
'DISK_GB': 40,
'CUSTOM_FOO': 1
}
},
},
# report client assumes a new consumer in this case
'consumer_generation': None,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_resize_to_same_host_with_shared(self):
"""Tests resize to the same host operation. In this case allocation
exists against the same host RP and the shared RP but with the
migration_uuid.
"""
get_current_allocations_resp_mock = mock.Mock(status_code=200)
# source host allocation held by the migration_uuid so it is not
# not returned to the claim code as that asks for the instance_uuid
# consumer
get_current_allocations_resp_mock.json.return_value = {
'allocations': {},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock
put_allocations_resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
consumer_uuid = uuids.consumer_uuid
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
# are all being increased but on the same host. We also throw a custom
# resource class in the new allocation to make sure it's not lost
alloc_req = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048,
'CUSTOM_FOO': 1
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 40,
}
},
},
# this allocation request comes from the scheduler therefore it
# does not have consumer_generation in it.
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations': {
uuids.same_host: {
'resources': {
'VCPU': 2,
'MEMORY_MB': 2048,
'CUSTOM_FOO': 1
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 40,
}
},
},
# report client assumes a new consumer in this case
'consumer_generation': None,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_evacuate_no_shared(self):
"""Tests non-forced evacuate. In this case both the source and the
dest allocation are held by the instance_uuid in placement. So the
claim code needs to merge allocations. The second claim comes from the
scheduler and therefore it does not have consumer_generation in it.
"""
# the source allocation is also held by the instance_uuid so report
# client will see it.
current_allocs = {
'allocations': {
uuids.source_host: {
'generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20
},
},
},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
status_code=200,
content=jsonutils.dumps(current_allocs))
put_allocations_resp_mock = fake_requests.FakeResponse(status_code=204)
self.ks_adap_mock.put.return_value = put_allocations_resp_mock
consumer_uuid = uuids.consumer_uuid
# this is an evacuate so we have the same resources request towards the
# dest host
alloc_req = {
'allocations': {
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20,
}
},
},
# this allocation request comes from the scheduler therefore it
# does not have consumer_generation in it.
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
# we expect that both the source and dest allocations are here
expected_payload = {
'allocations': {
uuids.source_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20
},
},
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20,
}
},
},
# report client uses the consumer_generation that it got from
# placement when asked for the existing allocations
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_evacuate_with_shared(self):
"""Similar test that test_claim_resources_success_evacuate_no_shared
but adds shared disk into the mix.
"""
# the source allocation is also held by the instance_uuid so report
# client will see it.
current_allocs = {
'allocations': {
uuids.source_host: {
'generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'generation': 42,
'resources': {
'DISK_GB': 20,
},
},
},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
status_code=200,
content = jsonutils.dumps(current_allocs))
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
status_code=204)
consumer_uuid = uuids.consumer_uuid
# this is an evacuate so we have the same resources request towards the
# dest host
alloc_req = {
'allocations': {
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'generation': 42,
'resources': {
'DISK_GB': 20,
},
},
},
# this allocation request comes from the scheduler therefore it
# does not have consumer_generation in it.
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
# we expect that both the source and dest allocations are here plus the
# shared storage allocation
expected_payload = {
'allocations': {
uuids.source_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 20,
},
},
},
# report client uses the consumer_generation that got from
# placement when asked for the existing allocations
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_force_evacuate_no_shared(self):
"""Tests forced evacuate. In this case both the source and the
dest allocation are held by the instance_uuid in placement. So the
claim code needs to merge allocations. The second claim comes from the
conductor and therefore it does have consumer_generation in it.
"""
# the source allocation is also held by the instance_uuid so report
# client will see it.
current_allocs = {
'allocations': {
uuids.source_host: {
'generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20
},
},
},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
status_code=200,
content=jsonutils.dumps(current_allocs))
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
status_code=204)
consumer_uuid = uuids.consumer_uuid
# this is an evacuate so we have the same resources request towards the
# dest host
alloc_req = {
'allocations': {
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20,
}
},
},
# this allocation request comes from the conductor that read the
# allocation from placement therefore it has consumer_generation in
# it.
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
# we expect that both the source and dest allocations are here
expected_payload = {
'allocations': {
uuids.source_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20
},
},
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 20,
}
},
},
# report client uses the consumer_generation that it got in the
# allocation request
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
def test_claim_resources_success_force_evacuate_with_shared(self):
"""Similar test that
test_claim_resources_success_force_evacuate_no_shared but adds shared
disk into the mix.
"""
# the source allocation is also held by the instance_uuid so report
# client will see it.
current_allocs = {
'allocations': {
uuids.source_host: {
'generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'generation': 42,
'resources': {
'DISK_GB': 20,
},
},
},
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
status_code=200,
content=jsonutils.dumps(current_allocs))
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
status_code=204)
consumer_uuid = uuids.consumer_uuid
# this is an evacuate so we have the same resources request towards the
# dest host
alloc_req = {
'allocations': {
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'generation': 42,
'resources': {
'DISK_GB': 20,
},
},
},
# this allocation request comes from the conductor that read the
# allocation from placement therefore it has consumer_generation in
# it.
"consumer_generation": 1,
"project_id": uuids.project_id,
"user_id": uuids.user_id
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
# we expect that both the source and dest allocations are here plus the
# shared storage allocation
expected_payload = {
'allocations': {
uuids.source_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.dest_host: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
uuids.shared_storage: {
'resources': {
'DISK_GB': 20,
},
},
},
# report client uses the consumer_generation that it got in the
# allocation request
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.assertTrue(res)
@mock.patch('time.sleep', new=mock.Mock())
def test_claim_resources_fail_due_to_rp_generation_retry_success(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mocks = [
fake_requests.FakeResponse(
409,
jsonutils.dumps(
{'errors': [
{'code': 'placement.concurrent_update',
'detail': ''}]})),
fake_requests.FakeResponse(204)
]
self.ks_adap_mock.put.side_effect = resp_mocks
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations':
{rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()}
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
expected_payload['consumer_generation'] = None
# We should have exactly two calls to the placement API that look
# identical since we're retrying the same HTTP request
expected_calls = [
mock.call(expected_url, microversion='1.28', json=expected_payload,
global_request_id=self.context.global_id)] * 2
self.assertEqual(len(expected_calls),
self.ks_adap_mock.put.call_count)
self.ks_adap_mock.put.assert_has_calls(expected_calls)
self.assertTrue(res)
@mock.patch.object(report.LOG, 'warning')
def test_claim_resources_failure(self, mock_log):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = fake_requests.FakeResponse(
409,
jsonutils.dumps(
{'errors': [
{'code': 'something else',
'detail': 'not cool'}]}))
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.claim_resources(self.context, consumer_uuid,
alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations':
{rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()}
}
expected_payload['project_id'] = project_id
expected_payload['user_id'] = user_id
expected_payload['consumer_generation'] = None
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=expected_payload,
global_request_id=self.context.global_id)
self.assertFalse(res)
self.assertTrue(mock_log.called)
def test_claim_resources_consumer_generation_failure(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.return_value = {
'allocations': {}, # build instance, not move
}
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = fake_requests.FakeResponse(
409,
jsonutils.dumps(
{'errors': [
{'code': 'placement.concurrent_update',
'detail': 'consumer generation conflict'}]}))
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
alloc_req = {
'allocations': {
uuids.cn1: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
}
},
},
}
project_id = uuids.project_id
user_id = uuids.user_id
self.assertRaises(exception.AllocationUpdateFailed,
self.client.claim_resources, self.context,
consumer_uuid, alloc_req, project_id, user_id,
allocation_request_version='1.28')
expected_url = "/allocations/%s" % consumer_uuid
expected_payload = {
'allocations': {
rp_uuid: res
for rp_uuid, res in alloc_req['allocations'].items()},
'project_id': project_id,
'user_id': user_id,
'consumer_generation': None}
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=expected_payload,
global_request_id=self.context.global_id)
def test_remove_provider_from_inst_alloc_no_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation for a move operation to remove the source host results in
sending placement the proper payload to PUT
/allocations/{consumer_uuid} call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.side_effect = [
{
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id,
},
# the second get is for resource providers in the compute tree,
# return just the compute
{
"resource_providers": [
{
"uuid": uuids.source,
},
]
},
]
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_tree_from_instance_allocation(
self.context, consumer_uuid, uuids.source)
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': {
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id
}
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_with_shared(self):
"""Tests that the method which manipulates an existing doubled-up
allocation with DISK_GB being consumed from a shared storage provider
for a move operation to remove the source host results in sending
placement the proper payload to PUT /allocations/{consumer_uuid}
call.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.side_effect = [
{
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id,
},
# the second get is for resource providers in the compute tree,
# return just the compute
{
"resource_providers": [
{
"uuid": uuids.source,
},
]
},
]
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=204)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_tree_from_instance_allocation(
self.context, consumer_uuid, uuids.source)
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': {
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id
}
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_no_source(self):
"""Tests that if remove_provider_tree_from_instance_allocation() fails
to find any allocations for the source host, it just returns True and
does not attempt to rewrite the allocation for the consumer.
"""
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.side_effect = [
# Act like the allocations already did not include the source host
# for some reason
{
'allocations': {
uuids.shared_storage: {
'resource_provider_generation': 42,
'resources': {
'DISK_GB': 100,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id,
},
# the second get is for resource providers in the compute tree,
# return just the compute
{
"resource_providers": [
{
"uuid": uuids.source,
},
]
},
]
self.ks_adap_mock.get.return_value = get_resp_mock
consumer_uuid = uuids.consumer_uuid
res = self.client.remove_provider_tree_from_instance_allocation(
self.context, consumer_uuid, uuids.source)
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
self.assertTrue(res)
def test_remove_provider_from_inst_alloc_fail_get_allocs(self):
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(
status_code=500)
consumer_uuid = uuids.consumer_uuid
self.assertRaises(
exception.ConsumerAllocationRetrievalFailed,
self.client.remove_provider_tree_from_instance_allocation,
self.context, consumer_uuid, uuids.source)
self.ks_adap_mock.get.assert_called()
self.ks_adap_mock.put.assert_not_called()
def test_remove_provider_from_inst_alloc_consumer_gen_conflict(self):
get_resp_mock = mock.Mock(status_code=200)
get_resp_mock.json.side_effect = [
{
'allocations': {
uuids.source: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id,
},
# the second get is for resource providers in the compute tree,
# return just the compute
{
"resource_providers": [
{
"uuid": uuids.source,
},
]
},
]
self.ks_adap_mock.get.return_value = get_resp_mock
resp_mock = mock.Mock(status_code=409)
self.ks_adap_mock.put.return_value = resp_mock
consumer_uuid = uuids.consumer_uuid
res = self.client.remove_provider_tree_from_instance_allocation(
self.context, consumer_uuid, uuids.source)
self.assertFalse(res)
def test_remove_provider_tree_from_inst_alloc_nested(self):
self.ks_adap_mock.get.side_effect = [
fake_requests.FakeResponse(
status_code=200,
content=jsonutils.dumps(
{
'allocations': {
uuids.source_compute: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
uuids.source_nested: {
'resource_provider_generation': 42,
'resources': {
'CUSTOM_MAGIC': 1
},
},
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id,
})),
# the second get is for resource providers in the compute tree,
# return both RPs in the source compute tree
fake_requests.FakeResponse(
status_code=200,
content=jsonutils.dumps(
{
"resource_providers": [
{
"uuid": uuids.source_compute,
},
{
"uuid": uuids.source_nested,
},
]
}))
]
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
status_code=204)
consumer_uuid = uuids.consumer_uuid
project_id = uuids.project_id
user_id = uuids.user_id
res = self.client.remove_provider_tree_from_instance_allocation(
self.context, consumer_uuid, uuids.source_compute)
expected_url = "/allocations/%s" % consumer_uuid
# New allocations should only include the destination...
expected_payload = {
'allocations': {
uuids.destination: {
'resource_provider_generation': 42,
'resources': {
'VCPU': 1,
'MEMORY_MB': 1024,
},
},
},
'consumer_generation': 1,
'project_id': project_id,
'user_id': user_id
}
self.assertEqual(
[
mock.call(
'/allocations/%s' % consumer_uuid,
global_request_id=self.context.global_id,
microversion='1.28'
),
mock.call(
'/resource_providers?in_tree=%s' % uuids.source_compute,
global_request_id=self.context.global_id,
microversion='1.14'
)
],
self.ks_adap_mock.get.mock_calls)
# We have to pull the json body from the mock call_args to validate
# it separately otherwise hash seed issues get in the way.
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
self.assertEqual(expected_payload, actual_payload)
self.ks_adap_mock.put.assert_called_once_with(
expected_url, microversion='1.28', json=mock.ANY,
global_request_id=self.context.global_id)
self.assertTrue(res)
class TestMoveAllocations(SchedulerReportClientTestCase):
def setUp(self):
super(TestMoveAllocations, self).setUp()
# We want to reuse the mock throughout the class, but with
# different return values.
patcher = mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.post')
self.mock_post = patcher.start()
self.addCleanup(patcher.stop)
self.mock_post.return_value.status_code = 204
self.rp_uuid = mock.sentinel.rp
self.consumer_uuid = mock.sentinel.consumer
self.data = {"MEMORY_MB": 1024}
patcher = mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.get')
self.mock_get = patcher.start()
self.addCleanup(patcher.stop)
self.project_id = mock.sentinel.project_id
self.user_id = mock.sentinel.user_id
self.mock_post.return_value.status_code = 204
self.rp_uuid = mock.sentinel.rp
self.source_consumer_uuid = mock.sentinel.source_consumer
self.target_consumer_uuid = mock.sentinel.target_consumer
self.source_consumer_data = {
"allocations": {
self.rp_uuid: {
"generation": 1,
"resources": {
"MEMORY_MB": 1024
}
}
},
"consumer_generation": 2,
"project_id": self.project_id,
"user_id": self.user_id
}
self.source_rsp = mock.Mock()
self.source_rsp.json.return_value = self.source_consumer_data
self.target_consumer_data = {
"allocations": {
self.rp_uuid: {
"generation": 1,
"resources": {
"MEMORY_MB": 2048
}
}
},
"consumer_generation": 1,
"project_id": self.project_id,
"user_id": self.user_id
}
self.target_rsp = mock.Mock()
self.target_rsp.json.return_value = self.target_consumer_data
self.mock_get.side_effect = [self.source_rsp, self.target_rsp]
self.expected_url = '/allocations'
self.expected_microversion = '1.28'
def test_url_microversion(self):
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid, self.target_consumer_uuid)
self.assertTrue(resp)
self.mock_post.assert_called_once_with(
self.expected_url, mock.ANY,
version=self.expected_microversion,
global_request_id=self.context.global_id)
def test_move_to_empty_target(self):
self.target_consumer_data = {"allocations": {}}
target_rsp = mock.Mock()
target_rsp.json.return_value = self.target_consumer_data
self.mock_get.side_effect = [self.source_rsp, target_rsp]
expected_payload = {
self.target_consumer_uuid: {
"allocations": {
self.rp_uuid: {
"resources": {
"MEMORY_MB": 1024
},
"generation": 1
}
},
"consumer_generation": None,
"project_id": self.project_id,
"user_id": self.user_id,
},
self.source_consumer_uuid: {
"allocations": {},
"consumer_generation": 2,
"project_id": self.project_id,
"user_id": self.user_id,
}
}
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid, self.target_consumer_uuid)
self.assertTrue(resp)
self.mock_post.assert_called_once_with(
self.expected_url, expected_payload,
version=self.expected_microversion,
global_request_id=self.context.global_id)
@mock.patch('nova.scheduler.client.report.LOG.info')
def test_move_from_empty_source(self, mock_info):
"""Tests the case that the target has allocations but the source does
not so the move_allocations method assumes the allocations were already
moved and returns True without trying to POST /allocations.
"""
source_consumer_data = {"allocations": {}}
source_rsp = mock.Mock()
source_rsp.json.return_value = source_consumer_data
self.mock_get.side_effect = [source_rsp, self.target_rsp]
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid, self.target_consumer_uuid)
self.assertTrue(resp)
self.mock_post.assert_not_called()
mock_info.assert_called_once()
self.assertIn('Allocations not found for consumer',
mock_info.call_args[0][0])
def test_move_to_non_empty_target(self):
self.mock_get.side_effect = [self.source_rsp, self.target_rsp]
expected_payload = {
self.target_consumer_uuid: {
"allocations": {
self.rp_uuid: {
"resources": {
"MEMORY_MB": 1024
},
"generation": 1
}
},
"consumer_generation": 1,
"project_id": self.project_id,
"user_id": self.user_id,
},
self.source_consumer_uuid: {
"allocations": {},
"consumer_generation": 2,
"project_id": self.project_id,
"user_id": self.user_id,
}
}
with fixtures.EnvironmentVariable('OS_DEBUG', '1'):
with nova_fixtures.StandardLogging() as stdlog:
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid,
self.target_consumer_uuid)
self.assertTrue(resp)
self.mock_post.assert_called_once_with(
self.expected_url, expected_payload,
version=self.expected_microversion,
global_request_id=self.context.global_id)
self.assertIn('Overwriting current allocation',
stdlog.logger.output)
@mock.patch('time.sleep')
def test_409_concurrent_provider_update(self, mock_sleep):
# there will be 1 normal call and 3 retries
self.mock_get.side_effect = [self.source_rsp, self.target_rsp,
self.source_rsp, self.target_rsp,
self.source_rsp, self.target_rsp,
self.source_rsp, self.target_rsp]
rsp = fake_requests.FakeResponse(
409,
jsonutils.dumps(
{'errors': [
{'code': 'placement.concurrent_update',
'detail': ''}]}))
self.mock_post.return_value = rsp
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid, self.target_consumer_uuid)
self.assertFalse(resp)
# Post was attempted four times.
self.assertEqual(4, self.mock_post.call_count)
@mock.patch('nova.scheduler.client.report.LOG.warning')
def test_not_409_failure(self, mock_log):
error_message = 'placement not there'
self.mock_post.return_value.status_code = 503
self.mock_post.return_value.text = error_message
resp = self.client.move_allocations(
self.context, self.source_consumer_uuid, self.target_consumer_uuid)
self.assertFalse(resp)
args, kwargs = mock_log.call_args
log_message = args[0]
log_args = args[1]
self.assertIn('Unable to post allocations', log_message)
self.assertEqual(error_message, log_args['text'])
def test_409_concurrent_consumer_update(self):
self.mock_post.return_value = fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': 'consumer generation conflict'}]}))
self.assertRaises(exception.AllocationMoveFailed,
self.client.move_allocations, self.context,
self.source_consumer_uuid, self.target_consumer_uuid)
class TestProviderOperations(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_traits')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
def test_ensure_resource_provider_get(self, get_rpt_mock, get_shr_mock,
get_trait_mock, get_agg_mock, get_inv_mock, create_rp_mock):
# No resource provider exists in the client's cache, so validate that
# if we get the resource provider from the placement API that we don't
# try to create the resource provider.
get_rpt_mock.return_value = [{
'uuid': uuids.compute_node,
'name': mock.sentinel.name,
'generation': 1,
}]
get_inv_mock.return_value = None
get_agg_mock.return_value = report.AggInfo(
aggregates=set([uuids.agg1]), generation=42)
get_trait_mock.return_value = report.TraitInfo(
traits=set(['CUSTOM_GOLD']), generation=43)
get_shr_mock.return_value = []
def assert_cache_contents():
self.assertTrue(
self.client._provider_tree.exists(uuids.compute_node))
self.assertTrue(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuids.compute_node,
[uuids.agg2]))
self.assertTrue(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuids.compute_node,
['CUSTOM_SILVER']))
data = self.client._provider_tree.data(uuids.compute_node)
self.assertEqual(43, data.generation)
self.client._ensure_resource_provider(self.context, uuids.compute_node)
assert_cache_contents()
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
get_agg_mock.assert_called_once_with(self.context, uuids.compute_node)
get_trait_mock.assert_called_once_with(self.context,
uuids.compute_node)
get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1]))
self.assertFalse(create_rp_mock.called)
# Now that the cache is populated, a subsequent call should be a no-op.
get_rpt_mock.reset_mock()
get_agg_mock.reset_mock()
get_trait_mock.reset_mock()
get_shr_mock.reset_mock()
self.client._ensure_resource_provider(self.context, uuids.compute_node)
assert_cache_contents()
get_rpt_mock.assert_not_called()
get_agg_mock.assert_not_called()
get_trait_mock.assert_not_called()
get_shr_mock.assert_not_called()
create_rp_mock.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
def test_ensure_resource_provider_create_fail(self, get_rpt_mock,
refresh_mock, create_rp_mock):
# No resource provider exists in the client's cache, and
# _create_provider raises, indicating there was an error with the
# create call. Ensure we don't populate the resource provider cache
get_rpt_mock.return_value = []
create_rp_mock.side_effect = exception.ResourceProviderCreationFailed(
name=uuids.compute_node)
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node))
self.assertFalse(refresh_mock.called)
self.assertRaises(
ValueError,
self.client._provider_tree.in_aggregates, uuids.compute_node, [])
self.assertRaises(
ValueError,
self.client._provider_tree.has_traits, uuids.compute_node, [])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider', return_value=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
def test_ensure_resource_provider_create_no_placement(self, get_rpt_mock,
refresh_mock, create_rp_mock):
# No resource provider exists in the client's cache, and
# @safe_connect on _create_resource_provider returns None because
# Placement isn't running yet. Ensure we don't populate the resource
# provider cache.
get_rpt_mock.return_value = []
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._ensure_resource_provider, self.context,
uuids.compute_node)
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context, uuids.compute_node, uuids.compute_node,
parent_provider_uuid=None)
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node))
refresh_mock.assert_not_called()
self.assertRaises(
ValueError,
self.client._provider_tree.in_aggregates, uuids.compute_node, [])
self.assertRaises(
ValueError,
self.client._provider_tree.has_traits, uuids.compute_node, [])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_and_get_inventory')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
def test_ensure_resource_provider_create(self, get_rpt_mock,
refresh_inv_mock,
refresh_assoc_mock,
create_rp_mock):
# No resource provider exists in the client's cache and no resource
# provider was returned from the placement API, so verify that in this
# case we try to create the resource provider via the placement API.
get_rpt_mock.return_value = []
create_rp_mock.return_value = {
'uuid': uuids.compute_node,
'name': 'compute-name',
'generation': 1,
}
self.assertEqual(
uuids.compute_node,
self.client._ensure_resource_provider(self.context,
uuids.compute_node))
self._validate_provider(uuids.compute_node, name='compute-name',
generation=1, parent_uuid=None,
aggregates=set(), traits=set())
# We don't refresh for a just-created provider
refresh_inv_mock.assert_not_called()
refresh_assoc_mock.assert_not_called()
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node)
create_rp_mock.assert_called_once_with(
self.context,
uuids.compute_node,
uuids.compute_node, # name param defaults to UUID if None
parent_provider_uuid=None,
)
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node))
create_rp_mock.reset_mock()
# Validate the path where we specify a name (don't default to the UUID)
self.client._ensure_resource_provider(
self.context, uuids.cn2, 'a-name')
create_rp_mock.assert_called_once_with(
self.context, uuids.cn2, 'a-name', parent_provider_uuid=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
def test_ensure_resource_provider_tree(self, get_rpt_mock, create_rp_mock,
refresh_mock):
"""Test _ensure_resource_provider with a tree of providers."""
def _create_resource_provider(context, uuid, name,
parent_provider_uuid=None):
"""Mock side effect for creating the RP with the specified args."""
return {
'uuid': uuid,
'name': name,
'generation': 0,
'parent_provider_uuid': parent_provider_uuid
}
create_rp_mock.side_effect = _create_resource_provider
# We at least have to simulate the part of _refresh_associations that
# marks a provider as 'seen'
def mocked_refresh(context, rp_uuid, **kwargs):
self.client._association_refresh_time[rp_uuid] = time.time()
refresh_mock.side_effect = mocked_refresh
# Not initially in the placement database, so we have to create it.
get_rpt_mock.return_value = []
# Create the root
root = self.client._ensure_resource_provider(self.context, uuids.root)
self.assertEqual(uuids.root, root)
# Now create a child
child1 = self.client._ensure_resource_provider(
self.context, uuids.child1, name='junior',
parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child1, child1)
# If we re-ensure the child, we get the object from the tree, not a
# newly-created one - i.e. the early .find() works like it should.
self.assertIs(child1,
self.client._ensure_resource_provider(self.context,
uuids.child1))
# Make sure we can create a grandchild
grandchild = self.client._ensure_resource_provider(
self.context, uuids.grandchild,
parent_provider_uuid=uuids.child1)
self.assertEqual(uuids.grandchild, grandchild)
# Now create a second child of the root and make sure it doesn't wind
# up in some crazy wrong place like under child1 or grandchild
child2 = self.client._ensure_resource_provider(
self.context, uuids.child2, parent_provider_uuid=uuids.root)
self.assertEqual(uuids.child2, child2)
all_rp_uuids = [uuids.root, uuids.child1, uuids.child2,
uuids.grandchild]
# At this point we should get all the providers.
self.assertEqual(
set(all_rp_uuids),
set(self.client._provider_tree.get_provider_uuids()))
# And now _ensure is a no-op because everything is cached
get_rpt_mock.reset_mock()
create_rp_mock.reset_mock()
refresh_mock.reset_mock()
for rp_uuid in all_rp_uuids:
self.client._ensure_resource_provider(self.context, rp_uuid)
get_rpt_mock.assert_not_called()
create_rp_mock.assert_not_called()
refresh_mock.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
def test_ensure_resource_provider_refresh_fetch(self, mock_ref_assoc,
mock_gpit):
"""Make sure refreshes are called with the appropriate UUIDs and flags
when we fetch the provider tree from placement.
"""
tree_uuids = set([uuids.root, uuids.one, uuids.two])
mock_gpit.return_value = [{'uuid': u, 'name': u, 'generation': 42}
for u in tree_uuids]
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_ref_assoc.assert_has_calls(
[mock.call(self.context, uuid, force=True)
for uuid in tree_uuids])
self.assertEqual(tree_uuids,
set(self.client._provider_tree.get_provider_uuids()))
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_providers_in_tree')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_create_resource_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_refresh_associations')
def test_ensure_resource_provider_refresh_create(self, mock_refresh,
mock_create, mock_gpit):
"""Make sure refresh is not called when we create the RP."""
mock_gpit.return_value = []
mock_create.return_value = {'name': 'cn', 'uuid': uuids.cn,
'generation': 42}
self.assertEqual(uuids.root,
self.client._ensure_resource_provider(self.context,
uuids.root))
mock_gpit.assert_called_once_with(self.context, uuids.root)
mock_create.assert_called_once_with(self.context, uuids.root,
uuids.root,
parent_provider_uuid=None)
mock_refresh.assert_not_called()
self.assertEqual([uuids.cn],
self.client._provider_tree.get_provider_uuids())
def test_get_allocation_candidates(self):
resp_mock = mock.Mock(status_code=200)
json_data = {
'allocation_requests': mock.sentinel.alloc_reqs,
'provider_summaries': mock.sentinel.p_sums,
}
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources:VCPU': '1',
'resources:MEMORY_MB': '1024',
'trait:HW_CPU_X86_AVX': 'required',
'trait:CUSTOM_TRAIT1': 'required',
'trait:CUSTOM_TRAIT2': 'preferred',
'trait:CUSTOM_TRAIT3': 'forbidden',
'trait:CUSTOM_TRAIT4': 'forbidden',
'resources_DISK:DISK_GB': '30',
'trait_DISK:STORAGE_DISK_SSD': 'required',
'resources2:VGPU': '2',
'trait2:HW_GPU_RESOLUTION_W2560H1600': 'required',
'trait2:HW_GPU_API_VULKAN': 'required',
'resources_NET:SRIOV_NET_VF': '1',
'resources_NET:CUSTOM_NET_EGRESS_BYTES_SEC': '125000',
'group_policy': 'isolate',
# These are ignored because misspelled, bad value, etc.
'resources*2:CUSTOM_WIDGET': '123',
'trait:HW_NIC_OFFLOAD_LRO': 'preferred',
'group_policy3': 'none',
})
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False)
resources = scheduler_utils.ResourceRequest(req_spec)
resources.get_request_group(None).aggregates = [
['agg1', 'agg2', 'agg3'], ['agg1', 'agg2']]
forbidden_aggs = set(['agg1', 'agg5', 'agg6'])
resources.get_request_group(None).forbidden_aggregates = forbidden_aggs
expected_path = '/allocation_candidates'
expected_query = [
('group_policy', 'isolate'),
('limit', '1000'),
('member_of', '!in:agg1,agg5,agg6'),
('member_of', 'in:agg1,agg2'),
('member_of', 'in:agg1,agg2,agg3'),
('required', 'CUSTOM_TRAIT1,HW_CPU_X86_AVX,!CUSTOM_TRAIT3,'
'!CUSTOM_TRAIT4'),
('required2', 'HW_GPU_API_VULKAN,HW_GPU_RESOLUTION_W2560H1600'),
('required_DISK', 'STORAGE_DISK_SSD'),
('resources', 'MEMORY_MB:1024,VCPU:1'),
('resources2', 'VGPU:2'),
('resources_DISK', 'DISK_GB:30'),
('resources_NET',
'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1')
]
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = (
self.client.get_allocation_candidates(self.context, resources))
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qsl(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
expected_url = '/allocation_candidates?%s' % parse.urlencode(
expected_query)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.35',
global_request_id=self.context.global_id)
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.assertEqual(mock.sentinel.p_sums, p_sums)
def test_get_ac_no_trait_bogus_group_policy_custom_limit(self):
self.flags(max_placement_results=42, group='scheduler')
resp_mock = mock.Mock(status_code=200)
json_data = {
'allocation_requests': mock.sentinel.alloc_reqs,
'provider_summaries': mock.sentinel.p_sums,
}
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources:VCPU': '1',
'resources:MEMORY_MB': '1024',
'resources1:DISK_GB': '30',
'group_policy': 'bogus',
})
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False)
resources = scheduler_utils.ResourceRequest(req_spec)
expected_path = '/allocation_candidates'
expected_query = [
('limit', '42'),
('resources', 'MEMORY_MB:1024,VCPU:1'),
('resources1', 'DISK_GB:30'),
]
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
alloc_reqs, p_sums, allocation_request_version = (
self.client.get_allocation_candidates(self.context, resources))
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qsl(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
expected_url = '/allocation_candidates?%s' % parse.urlencode(
expected_query)
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.35',
global_request_id=self.context.global_id)
self.assertEqual(mock.sentinel.p_sums, p_sums)
def test_get_allocation_candidates_not_found(self):
# Ensure _get_resource_provider() just returns None when the placement
# API doesn't find a resource provider matching a UUID
resp_mock = mock.Mock(status_code=404)
self.ks_adap_mock.get.return_value = resp_mock
expected_path = '/allocation_candidates'
expected_query = {
'resources': ['DISK_GB:15,MEMORY_MB:1024,VCPU:1'],
'limit': ['100']
}
# Make sure we're also honoring the configured limit
self.flags(max_placement_results=100, group='scheduler')
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False)
resources = scheduler_utils.ResourceRequest(req_spec)
res = self.client.get_allocation_candidates(self.context, resources)
self.ks_adap_mock.get.assert_called_once_with(
mock.ANY, microversion='1.35',
global_request_id=self.context.global_id)
url = self.ks_adap_mock.get.call_args[0][0]
split_url = parse.urlsplit(url)
query = parse.parse_qs(split_url.query)
self.assertEqual(expected_path, split_url.path)
self.assertEqual(expected_query, query)
self.assertIsNone(res[0])
def test_get_resource_provider_found(self):
# Ensure _get_resource_provider() returns a dict of resource provider
# if it finds a resource provider record from the placement API
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
json_data = {
'uuid': uuid,
'name': uuid,
'generation': 42,
'parent_provider_uuid': None,
}
resp_mock.json.return_value = json_data
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_resource_provider(self.context, uuid)
expected_provider_dict = dict(
uuid=uuid,
name=uuid,
generation=42,
parent_provider_uuid=None,
)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.14',
global_request_id=self.context.global_id)
self.assertEqual(expected_provider_dict, result)
def test_get_resource_provider_not_found(self):
# Ensure _get_resource_provider() just returns None when the placement
# API doesn't find a resource provider matching a UUID
resp_mock = mock.Mock(status_code=404)
self.ks_adap_mock.get.return_value = resp_mock
uuid = uuids.compute_node
result = self.client._get_resource_provider(self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.14',
global_request_id=self.context.global_id)
self.assertIsNone(result)
@mock.patch.object(report.LOG, 'error')
def test_get_resource_provider_error(self, logging_mock):
# Ensure _get_resource_provider() sets the error flag when trying to
# communicate with the placement API and not getting an error we can
# deal with
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': uuids.request_id}
uuid = uuids.compute_node
self.assertRaises(
exception.ResourceProviderRetrievalFailed,
self.client._get_resource_provider, self.context, uuid)
expected_url = '/resource_providers/' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.14',
global_request_id=self.context.global_id)
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id and return None
# from _get_resource_provider()
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_get_sharing_providers(self):
resp_mock = mock.Mock(status_code=200)
rpjson = [
{
'uuid': uuids.sharing1,
'name': 'bandwidth_provider',
'generation': 42,
'parent_provider_uuid': None,
'root_provider_uuid': None,
'links': [],
},
{
'uuid': uuids.sharing2,
'name': 'storage_provider',
'generation': 42,
'parent_provider_uuid': None,
'root_provider_uuid': None,
'links': [],
},
]
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client._get_sharing_providers(
self.context, [uuids.agg1, uuids.agg2])
expected_url = ('/resource_providers?member_of=in:' +
','.join((uuids.agg1, uuids.agg2)) +
'&required=MISC_SHARES_VIA_AGGREGATE')
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.18',
global_request_id=self.context.global_id)
self.assertEqual(rpjson, result)
def test_get_sharing_providers_emptylist(self):
self.assertEqual(
[], self.client._get_sharing_providers(self.context, []))
self.ks_adap_mock.get.assert_not_called()
@mock.patch.object(report.LOG, 'error')
def test_get_sharing_providers_error(self, logging_mock):
# Ensure _get_sharing_providers() logs an error and raises if the
# placement API call doesn't respond 200
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': uuids.request_id}
uuid = uuids.agg
self.assertRaises(exception.ResourceProviderRetrievalFailed,
self.client._get_sharing_providers,
self.context, [uuid])
expected_url = ('/resource_providers?member_of=in:' + uuid +
'&required=MISC_SHARES_VIA_AGGREGATE')
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.18',
global_request_id=self.context.global_id)
# A 503 Service Unavailable should trigger an error log that
# includes the placement request id
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_get_providers_in_tree(self):
# Ensure get_providers_in_tree() returns a list of resource
# provider dicts if it finds a resource provider record from the
# placement API
root = uuids.compute_node
child = uuids.child
resp_mock = mock.Mock(status_code=200)
rpjson = [
{
'uuid': root,
'name': 'daddy', 'generation': 42,
'parent_provider_uuid': None,
},
{
'uuid': child,
'name': 'junior',
'generation': 42,
'parent_provider_uuid': root,
},
]
resp_mock.json.return_value = {'resource_providers': rpjson}
self.ks_adap_mock.get.return_value = resp_mock
result = self.client.get_providers_in_tree(self.context, root)
expected_url = '/resource_providers?in_tree=' + root
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.14',
global_request_id=self.context.global_id)
self.assertEqual(rpjson, result)
@mock.patch.object(report.LOG, 'error')
def test_get_providers_in_tree_error(self, logging_mock):
# Ensure get_providers_in_tree() logs an error and raises if the
# placement API call doesn't respond 200
resp_mock = mock.Mock(status_code=503)
self.ks_adap_mock.get.return_value = resp_mock
self.ks_adap_mock.get.return_value.headers = {
'x-openstack-request-id': 'req-' + uuids.request_id}
uuid = uuids.compute_node
self.assertRaises(exception.ResourceProviderRetrievalFailed,
self.client.get_providers_in_tree, self.context,
uuid)
expected_url = '/resource_providers?in_tree=' + uuid
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.14',
global_request_id=self.context.global_id)
# A 503 Service Unavailable should trigger an error log that includes
# the placement request id
self.assertTrue(logging_mock.called)
self.assertEqual('req-' + uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_get_providers_in_tree_ksa_exc(self):
self.ks_adap_mock.get.side_effect = ks_exc.EndpointNotFound()
self.assertRaises(
ks_exc.ClientException,
self.client.get_providers_in_tree, self.context, uuids.whatever)
def test_create_resource_provider(self):
"""Test that _create_resource_provider() sends a dict of resource
provider information without a parent provider UUID.
"""
uuid = uuids.compute_node
name = 'computehost'
resp_mock = mock.Mock(status_code=200)
self.ks_adap_mock.post.return_value = resp_mock
self.assertEqual(
resp_mock.json.return_value,
self.client._create_resource_provider(self.context, uuid, name))
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, microversion='1.20',
global_request_id=self.context.global_id)
def test_create_resource_provider_with_parent(self):
"""Test that when specifying a parent provider UUID, that the
parent_provider_uuid part of the payload is properly specified.
"""
parent_uuid = uuids.parent
uuid = uuids.compute_node
name = 'computehost'
resp_mock = mock.Mock(status_code=200)
self.ks_adap_mock.post.return_value = resp_mock
self.assertEqual(
resp_mock.json.return_value,
self.client._create_resource_provider(
self.context,
uuid,
name,
parent_provider_uuid=parent_uuid,
)
)
expected_payload = {
'uuid': uuid,
'name': name,
'parent_provider_uuid': parent_uuid,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, microversion='1.20',
global_request_id=self.context.global_id)
@mock.patch.object(report.LOG, 'info')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider')
def test_create_resource_provider_concurrent_create(self, get_rp_mock,
logging_mock):
# Ensure _create_resource_provider() returns a dict of resource
# provider gotten from _get_resource_provider() if the call to create
# the resource provider in the placement API returned a 409 Conflict,
# indicating another thread concurrently created the resource provider
# record.
uuid = uuids.compute_node
name = 'computehost'
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
409, content='not a name conflict',
headers={'x-openstack-request-id': uuids.request_id})
get_rp_mock.return_value = mock.sentinel.get_rp
result = self.client._create_resource_provider(self.context, uuid,
name)
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, microversion='1.20',
global_request_id=self.context.global_id)
self.assertEqual(mock.sentinel.get_rp, result)
# The 409 response will produce a message to the info log.
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_create_resource_provider_name_conflict(self):
# When the API call to create the resource provider fails 409 with a
# name conflict, we raise an exception.
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
409, content='<stuff>Conflicting resource provider name: foo '
'already exists.</stuff>')
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, self.context,
uuids.compute_node, 'foo')
@mock.patch.object(report.LOG, 'error')
def test_create_resource_provider_error(self, logging_mock):
# Ensure _create_resource_provider() sets the error flag when trying to
# communicate with the placement API and not getting an error we can
# deal with
uuid = uuids.compute_node
name = 'computehost'
self.ks_adap_mock.post.return_value = fake_requests.FakeResponse(
503, headers={'x-openstack-request-id': uuids.request_id})
self.assertRaises(
exception.ResourceProviderCreationFailed,
self.client._create_resource_provider, self.context, uuid, name)
expected_payload = {
'uuid': uuid,
'name': name,
}
expected_url = '/resource_providers'
self.ks_adap_mock.post.assert_called_once_with(
expected_url, json=expected_payload, microversion='1.20',
global_request_id=self.context.global_id)
# A 503 Service Unavailable should log an error that
# includes the placement request id and
# _create_resource_provider() should return None
self.assertTrue(logging_mock.called)
self.assertEqual(uuids.request_id,
logging_mock.call_args[0][1]['placement_req_id'])
def test_put_empty(self):
# A simple put with an empty (not None) payload should send the empty
# payload through.
# Bug #1744786
url = '/resource_providers/%s/aggregates' % uuids.foo
self.client.put(url, [])
self.ks_adap_mock.put.assert_called_once_with(
url, json=[], microversion=None, global_request_id=None)
def test_delete_provider(self):
delete_mock = fake_requests.FakeResponse(None)
self.ks_adap_mock.delete.return_value = delete_mock
for status_code in (204, 404):
delete_mock.status_code = status_code
# Seed the caches
self.client._provider_tree.new_root('compute', uuids.root,
generation=0)
self.client._association_refresh_time[uuids.root] = 1234
self.client._delete_provider(uuids.root, global_request_id='gri')
self.ks_adap_mock.delete.assert_called_once_with(
'/resource_providers/' + uuids.root,
global_request_id='gri', microversion=None)
self.assertFalse(self.client._provider_tree.exists(uuids.root))
self.assertNotIn(uuids.root, self.client._association_refresh_time)
self.ks_adap_mock.delete.reset_mock()
def test_delete_provider_fail(self):
delete_mock = fake_requests.FakeResponse(None)
self.ks_adap_mock.delete.return_value = delete_mock
resp_exc_map = {409: exception.ResourceProviderInUse,
503: exception.ResourceProviderDeletionFailed}
for status_code, exc in resp_exc_map.items():
delete_mock.status_code = status_code
self.assertRaises(exc, self.client._delete_provider, uuids.root)
self.ks_adap_mock.delete.assert_called_once_with(
'/resource_providers/' + uuids.root, microversion=None,
global_request_id=None)
self.ks_adap_mock.delete.reset_mock()
def test_set_aggregates_for_provider(self):
aggs = [uuids.agg1, uuids.agg2]
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dumps({
'aggregates': aggs,
'resource_provider_generation': 1}))
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs)
exp_payload = {'aggregates': aggs,
'resource_provider_generation': 0}
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/aggregates' % uuids.rp, json=exp_payload,
microversion='1.19',
global_request_id=self.context.global_id)
# Cache was updated
ptree_data = self.client._provider_tree.data(uuids.rp)
self.assertEqual(set(aggs), ptree_data.aggregates)
self.assertEqual(1, ptree_data.generation)
def test_set_aggregates_for_provider_bad_args(self):
self.assertRaises(ValueError, self.client.set_aggregates_for_provider,
self.context, uuids.rp, {}, use_cache=False)
self.assertRaises(ValueError, self.client.set_aggregates_for_provider,
self.context, uuids.rp, {}, use_cache=False,
generation=None)
def test_set_aggregates_for_provider_fail(self):
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(503)
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_aggregates_for_provider,
self.context, uuids.rp, [uuids.agg])
# The cache wasn't updated
self.assertEqual(set(),
self.client._provider_tree.data(uuids.rp).aggregates)
def test_set_aggregates_for_provider_conflict(self):
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(409)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.set_aggregates_for_provider,
self.context, uuids.rp, [uuids.agg])
# The cache was invalidated
self.assertNotIn(uuids.rp,
self.client._provider_tree.get_provider_uuids())
self.assertNotIn(uuids.rp, self.client._association_refresh_time)
def test_set_aggregates_for_provider_short_circuit(self):
"""No-op when aggregates have not changed."""
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=7)
self.client.set_aggregates_for_provider(self.context, uuids.rp, [])
self.ks_adap_mock.put.assert_not_called()
def test_set_aggregates_for_provider_no_short_circuit(self):
"""Don't short-circuit if generation doesn't match, even if aggs have
not changed.
"""
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=2)
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dumps({
'aggregates': [],
'resource_provider_generation': 5}))
self.client.set_aggregates_for_provider(self.context, uuids.rp, [],
generation=4)
exp_payload = {'aggregates': [],
'resource_provider_generation': 4}
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/aggregates' % uuids.rp, json=exp_payload,
microversion='1.19',
global_request_id=self.context.global_id)
# Cache was updated
ptree_data = self.client._provider_tree.data(uuids.rp)
self.assertEqual(set(), ptree_data.aggregates)
self.assertEqual(5, ptree_data.generation)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider', return_value=mock.NonCallableMock)
def test_get_resource_provider_name_from_cache(self, mock_placement_get):
expected_name = 'rp'
self.client._provider_tree.new_root(
expected_name, uuids.rp, generation=0)
actual_name = self.client.get_resource_provider_name(
self.context, uuids.rp)
self.assertEqual(expected_name, actual_name)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider')
def test_get_resource_provider_name_from_placement(
self, mock_placement_get):
expected_name = 'rp'
mock_placement_get.return_value = {
'uuid': uuids.rp,
'name': expected_name
}
actual_name = self.client.get_resource_provider_name(
self.context, uuids.rp)
self.assertEqual(expected_name, actual_name)
mock_placement_get.assert_called_once_with(self.context, uuids.rp)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider')
def test_get_resource_provider_name_rp_not_found_in_placement(
self, mock_placement_get):
mock_placement_get.side_effect = \
exception.ResourceProviderNotFound(uuids.rp)
self.assertRaises(
exception.ResourceProviderNotFound,
self.client.get_resource_provider_name,
self.context, uuids.rp)
mock_placement_get.assert_called_once_with(self.context, uuids.rp)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider')
def test_get_resource_provider_name_placement_unavailable(
self, mock_placement_get):
mock_placement_get.side_effect = \
exception.ResourceProviderRetrievalFailed(uuid=uuids.rp)
self.assertRaises(
exception.ResourceProviderRetrievalFailed,
self.client.get_resource_provider_name,
self.context, uuids.rp)
class TestAggregates(SchedulerReportClientTestCase):
def test_get_provider_aggregates_found(self):
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
aggs = [
uuids.agg1,
uuids.agg2,
]
resp_mock.json.return_value = {'aggregates': aggs,
'resource_provider_generation': 42}
self.ks_adap_mock.get.return_value = resp_mock
result, gen = self.client._get_provider_aggregates(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.19',
global_request_id=self.context.global_id)
self.assertEqual(set(aggs), result)
self.assertEqual(42, gen)
@mock.patch.object(report.LOG, 'error')
def test_get_provider_aggregates_error(self, log_mock):
"""Test that when the placement API returns any error when looking up a
provider's aggregates, we raise an exception.
"""
uuid = uuids.compute_node
resp_mock = mock.Mock(headers={
'x-openstack-request-id': uuids.request_id})
self.ks_adap_mock.get.return_value = resp_mock
for status_code in (400, 404, 503):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderAggregateRetrievalFailed,
self.client._get_provider_aggregates, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/aggregates'
self.ks_adap_mock.get.assert_called_once_with(
expected_url, microversion='1.19',
global_request_id=self.context.global_id)
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
self.ks_adap_mock.get.reset_mock()
log_mock.reset_mock()
class TestTraits(SchedulerReportClientTestCase):
trait_api_kwargs = {'microversion': '1.6'}
def test_get_provider_traits_found(self):
uuid = uuids.compute_node
resp_mock = mock.Mock(status_code=200)
traits = [
'CUSTOM_GOLD',
'CUSTOM_SILVER',
]
resp_mock.json.return_value = {'traits': traits,
'resource_provider_generation': 42}
self.ks_adap_mock.get.return_value = resp_mock
result, gen = self.client.get_provider_traits(self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
expected_url,
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.assertEqual(set(traits), result)
self.assertEqual(42, gen)
@mock.patch.object(report.LOG, 'error')
def test_get_provider_traits_error(self, log_mock):
"""Test that when the placement API returns any error when looking up a
provider's traits, we raise an exception.
"""
uuid = uuids.compute_node
resp_mock = mock.Mock(headers={
'x-openstack-request-id': uuids.request_id})
self.ks_adap_mock.get.return_value = resp_mock
for status_code in (400, 404, 503):
resp_mock.status_code = status_code
self.assertRaises(
exception.ResourceProviderTraitRetrievalFailed,
self.client.get_provider_traits, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
expected_url,
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.assertTrue(log_mock.called)
self.assertEqual(uuids.request_id,
log_mock.call_args[0][1]['placement_req_id'])
self.ks_adap_mock.get.reset_mock()
log_mock.reset_mock()
def test_get_provider_traits_placement_comm_error(self):
"""ksa ClientException raises through."""
uuid = uuids.compute_node
self.ks_adap_mock.get.side_effect = ks_exc.EndpointNotFound()
self.assertRaises(ks_exc.ClientException,
self.client.get_provider_traits, self.context, uuid)
expected_url = '/resource_providers/' + uuid + '/traits'
self.ks_adap_mock.get.assert_called_once_with(
expected_url,
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
def test_ensure_traits(self):
"""Successful paths, various permutations of traits existing or needing
to be created.
"""
standard_traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
custom_traits = ['CUSTOM_GOLD', 'CUSTOM_SILVER']
all_traits = standard_traits + custom_traits
get_mock = mock.Mock(status_code=200)
self.ks_adap_mock.get.return_value = get_mock
# Request all traits; custom traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(self.context, all_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(all_traits),
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_has_calls(
[mock.call('/traits/' + trait,
global_request_id=self.context.global_id, json=None,
**self.trait_api_kwargs)
for trait in custom_traits], any_order=True)
self.ks_adap_mock.reset_mock()
# Request standard traits; no traits need to be created
get_mock.json.return_value = {'traits': standard_traits}
self.client._ensure_traits(self.context, standard_traits)
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(standard_traits),
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
self.ks_adap_mock.reset_mock()
# Request no traits - short circuit
self.client._ensure_traits(self.context, None)
self.client._ensure_traits(self.context, [])
self.ks_adap_mock.get.assert_not_called()
self.ks_adap_mock.put.assert_not_called()
def test_ensure_traits_fail_retrieval(self):
self.ks_adap_mock.get.return_value = mock.Mock(status_code=400)
self.assertRaises(exception.TraitRetrievalFailed,
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO',
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_not_called()
def test_ensure_traits_fail_creation(self):
get_mock = mock.Mock(status_code=200)
get_mock.json.return_value = {'traits': []}
self.ks_adap_mock.get.return_value = get_mock
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(400)
self.assertRaises(exception.TraitCreationFailed,
self.client._ensure_traits,
self.context, ['FOO'])
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:FOO',
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/traits/FOO',
global_request_id=self.context.global_id, json=None,
**self.trait_api_kwargs)
def test_set_traits_for_provider(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
# Make _ensure_traits succeed without PUTting
get_mock = mock.Mock(status_code=200)
get_mock.json.return_value = {'traits': traits}
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# Mock the /rp/{u}/traits PUT to succeed
put_mock = mock.Mock(status_code=200)
put_mock.json.return_value = {'traits': traits,
'resource_provider_generation': 1}
self.ks_adap_mock.put.return_value = put_mock
# Invoke
self.client.set_traits_for_provider(self.context, uuids.rp, traits)
# Verify API calls
self.ks_adap_mock.get.assert_called_once_with(
'/traits?name=in:' + ','.join(traits),
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
self.ks_adap_mock.put.assert_called_once_with(
'/resource_providers/%s/traits' % uuids.rp,
json={'traits': traits, 'resource_provider_generation': 0},
global_request_id=self.context.global_id,
**self.trait_api_kwargs)
# And ensure the provider tree cache was updated appropriately
self.assertFalse(
self.client._provider_tree.have_traits_changed(uuids.rp, traits))
# Validate the generation
self.assertEqual(
1, self.client._provider_tree.data(uuids.rp).generation)
def test_set_traits_for_provider_fail(self):
traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA']
get_mock = mock.Mock()
self.ks_adap_mock.get.return_value = get_mock
# Prime the provider tree cache
self.client._provider_tree.new_root('rp', uuids.rp, generation=0)
# _ensure_traits exception bubbles up
get_mock.status_code = 400
self.assertRaises(
exception.TraitRetrievalFailed,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
self.ks_adap_mock.put.assert_not_called()
get_mock.status_code = 200
get_mock.json.return_value = {'traits': traits}
# Conflict
self.ks_adap_mock.put.return_value = mock.Mock(status_code=409)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
# Other error
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
self.assertRaises(
exception.ResourceProviderUpdateFailed,
self.client.set_traits_for_provider,
self.context, uuids.rp, traits)
class TestAssociations(SchedulerReportClientTestCase):
def setUp(self):
super(TestAssociations, self).setUp()
self.mock_get_inv = self.useFixture(fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'_get_inventory')).mock
self.inv = {
'VCPU': {'total': 16},
'MEMORY_MB': {'total': 1024},
'DISK_GB': {'total': 10},
}
self.mock_get_inv.return_value = {
'resource_provider_generation': 41,
'inventories': self.inv,
}
self.mock_get_aggs = self.useFixture(fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')).mock
self.mock_get_aggs.return_value = report.AggInfo(
aggregates=set([uuids.agg1]), generation=42)
self.mock_get_traits = self.useFixture(fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_traits')).mock
self.mock_get_traits.return_value = report.TraitInfo(
traits=set(['CUSTOM_GOLD']), generation=43)
self.mock_get_sharing = self.useFixture(fixtures.MockPatch(
'nova.scheduler.client.report.SchedulerReportClient.'
'_get_sharing_providers')).mock
def assert_getters_were_called(self, uuid, sharing=True):
self.mock_get_inv.assert_called_once_with(self.context, uuid)
self.mock_get_aggs.assert_called_once_with(self.context, uuid)
self.mock_get_traits.assert_called_once_with(self.context, uuid)
if sharing:
self.mock_get_sharing.assert_called_once_with(
self.context, self.mock_get_aggs.return_value[0])
self.assertIn(uuid, self.client._association_refresh_time)
self.assertFalse(
self.client._provider_tree.has_inventory_changed(uuid, self.inv))
self.assertTrue(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
self.assertFalse(
self.client._provider_tree.in_aggregates(uuid, [uuids.agg2]))
self.assertTrue(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_GOLD']))
self.assertFalse(
self.client._provider_tree.has_traits(uuid, ['CUSTOM_SILVER']))
self.assertEqual(43, self.client._provider_tree.data(uuid).generation)
def assert_getters_not_called(self, timer_entry=None):
self.mock_get_inv.assert_not_called()
self.mock_get_aggs.assert_not_called()
self.mock_get_traits.assert_not_called()
self.mock_get_sharing.assert_not_called()
if timer_entry is None:
self.assertFalse(self.client._association_refresh_time)
else:
self.assertIn(timer_entry, self.client._association_refresh_time)
def reset_getter_mocks(self):
self.mock_get_inv.reset_mock()
self.mock_get_aggs.reset_mock()
self.mock_get_traits.reset_mock()
self.mock_get_sharing.reset_mock()
def test_refresh_associations_no_last(self):
"""Test that associations are refreshed when stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
self.client._refresh_associations(self.context, uuid)
self.assert_getters_were_called(uuid)
def test_refresh_associations_no_refresh_sharing(self):
"""Test refresh_sharing=False."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
self.client._refresh_associations(self.context, uuid,
refresh_sharing=False)
self.assert_getters_were_called(uuid, sharing=False)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_associations_stale')
def test_refresh_associations_not_stale(self, mock_stale):
"""Test that refresh associations is not called when the map is
not stale.
"""
mock_stale.return_value = False
uuid = uuids.compute_node
self.client._refresh_associations(self.context, uuid)
self.assert_getters_not_called()
@mock.patch.object(report.LOG, 'debug')
def test_refresh_associations_time(self, log_mock):
"""Test that refresh associations is called when the map is stale."""
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
# Called a first time because association_refresh_time is empty.
now = time.time()
self.client._refresh_associations(self.context, uuid)
self.assert_getters_were_called(uuid)
log_mock.assert_has_calls([
mock.call('Refreshing inventories for resource provider %s', uuid),
mock.call('Updating ProviderTree inventory for provider %s from '
'_refresh_and_get_inventory using data: %s',
uuid, self.inv),
mock.call('Refreshing aggregate associations for resource '
'provider %s, aggregates: %s', uuid, uuids.agg1),
mock.call('Refreshing trait associations for resource '
'provider %s, traits: %s', uuid, 'CUSTOM_GOLD')
])
# Clear call count.
self.reset_getter_mocks()
with mock.patch('time.time') as mock_future:
# Not called a second time because not enough time has passed.
mock_future.return_value = (now +
CONF.compute.resource_provider_association_refresh / 2)
self.client._refresh_associations(self.context, uuid)
self.assert_getters_not_called(timer_entry=uuid)
# Called because time has passed.
mock_future.return_value = (now +
CONF.compute.resource_provider_association_refresh + 1)
self.client._refresh_associations(self.context, uuid)
self.assert_getters_were_called(uuid)
def test_refresh_associations_disabled(self):
"""Test that refresh associations can be disabled."""
self.flags(resource_provider_association_refresh=0, group='compute')
uuid = uuids.compute_node
# Seed the provider tree so _refresh_associations finds the provider
self.client._provider_tree.new_root('compute', uuid, generation=1)
# Called a first time because association_refresh_time is empty.
now = time.time()
self.client._refresh_associations(self.context, uuid)
self.assert_getters_were_called(uuid)
# Clear call count.
self.reset_getter_mocks()
with mock.patch('time.time') as mock_future:
# A lot of time passes
mock_future.return_value = now + 10000000000000
self.client._refresh_associations(self.context, uuid)
self.assert_getters_not_called(timer_entry=uuid)
self.reset_getter_mocks()
# Forever passes
mock_future.return_value = float('inf')
self.client._refresh_associations(self.context, uuid)
self.assert_getters_not_called(timer_entry=uuid)
self.reset_getter_mocks()
# Even if no time passes, clearing the counter triggers refresh
mock_future.return_value = now
del self.client._association_refresh_time[uuid]
self.client._refresh_associations(self.context, uuid)
self.assert_getters_were_called(uuid)
class TestAllocations(SchedulerReportClientTestCase):
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node")
def test_delete_resource_provider_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
mock_by_host.return_value = [uuids.inst1, uuids.inst2]
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn, cascade=True)
mock_by_host.assert_called_once_with(
self.context, cn.host, cn.hypervisor_hostname)
self.assertEqual(2, mock_del_alloc.call_count)
exp_url = "/resource_providers/%s" % uuids.cn
mock_delete.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
self.assertFalse(self.client._provider_tree.exists(uuids.cn))
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node")
def test_delete_resource_provider_no_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
self.client._association_refresh_time[uuids.cn] = mock.Mock()
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
mock_by_host.return_value = [uuids.inst1, uuids.inst2]
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn)
mock_del_alloc.assert_not_called()
exp_url = "/resource_providers/%s" % uuids.cn
mock_delete.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
self.assertNotIn(uuids.cn, self.client._association_refresh_time)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete")
@mock.patch('nova.scheduler.client.report.LOG')
def test_delete_resource_provider_log_calls(self, mock_log, mock_delete):
# First, check a successful call
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
resp_mock = fake_requests.FakeResponse(204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn)
# With a 204, only the info should be called
self.assertEqual(1, mock_log.info.call_count)
self.assertEqual(0, mock_log.warning.call_count)
# Now check a 404 response
mock_log.reset_mock()
resp_mock.status_code = 404
self.client.delete_resource_provider(self.context, cn)
# With a 404, neither log message should be called
self.assertEqual(0, mock_log.info.call_count)
self.assertEqual(0, mock_log.warning.call_count)
# Finally, check a 409 response
mock_log.reset_mock()
resp_mock.status_code = 409
self.client.delete_resource_provider(self.context, cn)
# With a 409, only the error should be called
self.assertEqual(0, mock_log.info.call_count)
self.assertEqual(1, mock_log.error.call_count)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.delete',
new=mock.Mock(side_effect=ks_exc.EndpointNotFound()))
def test_delete_resource_provider_placement_exception(self):
"""Ensure that a ksa exception in delete_resource_provider raises
through.
"""
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
self.assertRaises(
ks_exc.ClientException,
self.client.delete_resource_provider, self.context, cn)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_get_allocations_for_resource_provider(self, mock_get):
mock_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dumps(
{'allocations': 'fake', 'resource_provider_generation': 42}))
ret = self.client.get_allocations_for_resource_provider(
self.context, 'rpuuid')
self.assertEqual('fake', ret.allocations)
mock_get.assert_called_once_with(
'/resource_providers/rpuuid/allocations',
global_request_id=self.context.global_id)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_get_allocations_for_resource_provider_fail(self, mock_get):
mock_get.return_value = fake_requests.FakeResponse(400, content="ouch")
self.assertRaises(exception.ResourceProviderAllocationRetrievalFailed,
self.client.get_allocations_for_resource_provider,
self.context, 'rpuuid')
mock_get.assert_called_once_with(
'/resource_providers/rpuuid/allocations',
global_request_id=self.context.global_id)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_get_allocs_for_consumer(self, mock_get):
mock_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dumps({'foo': 'bar'}))
ret = self.client.get_allocs_for_consumer(self.context, 'consumer')
self.assertEqual({'foo': 'bar'}, ret)
mock_get.assert_called_once_with(
'/allocations/consumer', version='1.28',
global_request_id=self.context.global_id)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_get_allocs_for_consumer_fail(self, mock_get):
mock_get.return_value = fake_requests.FakeResponse(400, content='err')
self.assertRaises(exception.ConsumerAllocationRetrievalFailed,
self.client.get_allocs_for_consumer,
self.context, 'consumer')
mock_get.assert_called_once_with(
'/allocations/consumer', version='1.28',
global_request_id=self.context.global_id)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_get_allocs_for_consumer_safe_connect_fail(self, mock_get):
mock_get.side_effect = ks_exc.EndpointNotFound()
self.assertRaises(ks_exc.ClientException,
self.client.get_allocs_for_consumer,
self.context, 'consumer')
mock_get.assert_called_once_with(
'/allocations/consumer', version='1.28',
global_request_id=self.context.global_id)
def _test_remove_res_from_alloc(
self, current_allocations, resources_to_remove,
updated_allocations):
with test.nested(
mock.patch(
"nova.scheduler.client.report.SchedulerReportClient.get"),
mock.patch(
"nova.scheduler.client.report.SchedulerReportClient.put")
) as (mock_get, mock_put):
mock_get.return_value = fake_requests.FakeResponse(
200, content=jsonutils.dumps(current_allocations))
self.client.remove_resources_from_instance_allocation(
self.context, uuids.consumer_uuid, resources_to_remove)
mock_get.assert_called_once_with(
'/allocations/%s' % uuids.consumer_uuid, version='1.28',
global_request_id=self.context.global_id)
mock_put.assert_called_once_with(
'/allocations/%s' % uuids.consumer_uuid, updated_allocations,
version='1.28', global_request_id=self.context.global_id)
def test_remove_res_from_alloc(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 13,
"resources": {
'VCPU': 10,
'MEMORY_MB': 4096,
},
},
uuids.rp2: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 300,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.rp1: {
'VCPU': 1
},
uuids.rp2: {
'NET_BW_EGR_KILOBIT_PER_SEC': 100,
'NET_BW_IGR_KILOBIT_PER_SEC': 200,
}
}
updated_allocations = {
"allocations": {
uuids.rp1: {
"generation": 13,
"resources": {
'VCPU': 9,
'MEMORY_MB': 4096,
},
},
uuids.rp2: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 100,
'NET_BW_IGR_KILOBIT_PER_SEC': 100,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
self._test_remove_res_from_alloc(
current_allocations, resources_to_remove, updated_allocations)
def test_remove_res_from_alloc_remove_rc_when_value_dropped_to_zero(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 300,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
# this will remove all of NET_BW_EGR_KILOBIT_PER_SEC resources from
# the allocation so the whole resource class will be removed
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 200,
}
}
updated_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 100,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
self._test_remove_res_from_alloc(
current_allocations, resources_to_remove, updated_allocations)
def test_remove_res_from_alloc_remove_rp_when_all_rc_removed(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 300,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 300,
}
}
updated_allocations = {
"allocations": {},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
self._test_remove_res_from_alloc(
current_allocations, resources_to_remove, updated_allocations)
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_remove_res_from_alloc_failed_to_get_alloc(
self, mock_get):
mock_get.side_effect = ks_exc.EndpointNotFound()
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 200,
}
}
self.assertRaises(
ks_exc.ClientException,
self.client.remove_resources_from_instance_allocation,
self.context, uuids.consumer_uuid, resources_to_remove)
def test_remove_res_from_alloc_empty_alloc(self):
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
'NET_BW_IGR_KILOBIT_PER_SEC': 200,
}
}
current_allocations = {
"allocations": {},
"consumer_generation": 0,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
ex = self.assertRaises(
exception.AllocationUpdateFailed,
self._test_remove_res_from_alloc, current_allocations,
resources_to_remove, None)
self.assertIn('The allocation is empty', six.text_type(ex))
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.put")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_remove_res_from_alloc_no_resource_to_remove(
self, mock_get, mock_put):
self.client.remove_resources_from_instance_allocation(
self.context, uuids.consumer_uuid, {})
mock_get.assert_not_called()
mock_put.assert_not_called()
def test_remove_res_from_alloc_missing_rc(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.rp1: {
'VCPU': 1,
}
}
ex = self.assertRaises(
exception.AllocationUpdateFailed, self._test_remove_res_from_alloc,
current_allocations, resources_to_remove, None)
self.assertIn(
"Key 'VCPU' is missing from the allocation",
six.text_type(ex))
def test_remove_res_from_alloc_missing_rp(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.other_rp: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
}
}
ex = self.assertRaises(
exception.AllocationUpdateFailed, self._test_remove_res_from_alloc,
current_allocations, resources_to_remove, None)
self.assertIn(
"Key '%s' is missing from the allocation" % uuids.other_rp,
six.text_type(ex))
def test_remove_res_from_alloc_not_enough_resource_to_remove(self):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 400,
}
}
ex = self.assertRaises(
exception.AllocationUpdateFailed, self._test_remove_res_from_alloc,
current_allocations, resources_to_remove, None)
self.assertIn(
'There are not enough allocated resources left on %s resource '
'provider to remove 400 amount of NET_BW_EGR_KILOBIT_PER_SEC '
'resources' %
uuids.rp1,
six.text_type(ex))
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.put")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_remove_res_from_alloc_retry_succeed(
self, mock_get, mock_put):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
current_allocations_2 = copy.deepcopy(current_allocations)
current_allocations_2['consumer_generation'] = 3
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
}
}
updated_allocations = {
"allocations": {},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
updated_allocations_2 = copy.deepcopy(updated_allocations)
updated_allocations_2['consumer_generation'] = 3
mock_get.side_effect = [
fake_requests.FakeResponse(
200, content=jsonutils.dumps(current_allocations)),
fake_requests.FakeResponse(
200, content=jsonutils.dumps(current_allocations_2))
]
mock_put.side_effect = [
fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': ''}]})),
fake_requests.FakeResponse(
status_code=204)
]
self.client.remove_resources_from_instance_allocation(
self.context, uuids.consumer_uuid, resources_to_remove)
self.assertEqual(
[
mock.call(
'/allocations/%s' % uuids.consumer_uuid, version='1.28',
global_request_id=self.context.global_id),
mock.call(
'/allocations/%s' % uuids.consumer_uuid, version='1.28',
global_request_id=self.context.global_id)
],
mock_get.mock_calls)
self.assertEqual(
[
mock.call(
'/allocations/%s' % uuids.consumer_uuid,
updated_allocations, version='1.28',
global_request_id=self.context.global_id),
mock.call(
'/allocations/%s' % uuids.consumer_uuid,
updated_allocations_2, version='1.28',
global_request_id=self.context.global_id),
],
mock_put.mock_calls)
@mock.patch('time.sleep', new=mock.Mock())
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.put")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient.get")
def test_remove_res_from_alloc_run_out_of_retries(
self, mock_get, mock_put):
current_allocations = {
"allocations": {
uuids.rp1: {
"generation": 42,
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
},
},
},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
resources_to_remove = {
uuids.rp1: {
'NET_BW_EGR_KILOBIT_PER_SEC': 200,
}
}
updated_allocations = {
"allocations": {},
"consumer_generation": 2,
"project_id": uuids.project_id,
"user_id": uuids.user_id,
}
get_rsp = fake_requests.FakeResponse(
200, content=jsonutils.dumps(current_allocations))
mock_get.side_effect = [get_rsp] * 4
put_rsp = fake_requests.FakeResponse(
status_code=409,
content=jsonutils.dumps(
{'errors': [{'code': 'placement.concurrent_update',
'detail': ''}]}))
mock_put.side_effect = [put_rsp] * 4
ex = self.assertRaises(
exception.AllocationUpdateFailed,
self.client.remove_resources_from_instance_allocation,
self.context, uuids.consumer_uuid, resources_to_remove)
self.assertIn(
'due to multiple successive generation conflicts',
six.text_type(ex))
get_call = mock.call(
'/allocations/%s' % uuids.consumer_uuid, version='1.28',
global_request_id=self.context.global_id)
mock_get.assert_has_calls([get_call] * 4)
put_call = mock.call(
'/allocations/%s' % uuids.consumer_uuid, updated_allocations,
version='1.28', global_request_id=self.context.global_id)
mock_put.assert_has_calls([put_call] * 4)
class TestResourceClass(SchedulerReportClientTestCase):
def setUp(self):
super(TestResourceClass, self).setUp()
_put_patch = mock.patch(
"nova.scheduler.client.report.SchedulerReportClient.put")
self.addCleanup(_put_patch.stop)
self.mock_put = _put_patch.start()
def test_ensure_resource_classes(self):
rcs = ['VCPU', 'CUSTOM_FOO', 'MEMORY_MB', 'CUSTOM_BAR']
self.client._ensure_resource_classes(self.context, rcs)
self.mock_put.assert_has_calls([
mock.call('/resource_classes/%s' % rc, None, version='1.7',
global_request_id=self.context.global_id)
for rc in ('CUSTOM_FOO', 'CUSTOM_BAR')
], any_order=True)
def test_ensure_resource_classes_none(self):
for empty in ([], (), set(), {}):
self.client._ensure_resource_classes(self.context, empty)
self.mock_put.assert_not_called()
def test_ensure_resource_classes_put_fail(self):
self.mock_put.return_value = fake_requests.FakeResponse(503)
rcs = ['VCPU', 'MEMORY_MB', 'CUSTOM_BAD']
self.assertRaises(
exception.InvalidResourceClass,
self.client._ensure_resource_classes, self.context, rcs)
# Only called with the "bad" one
self.mock_put.assert_called_once_with(
'/resource_classes/CUSTOM_BAD', None, version='1.7',
global_request_id=self.context.global_id)
class TestAggregateAddRemoveHost(SchedulerReportClientTestCase):
"""Unit tests for the methods of the report client which look up providers
by name and add/remove host aggregates to providers. These methods do not
access the SchedulerReportClient provider_tree attribute and are called
from the nova API, not the nova compute manager/resource tracker.
"""
def setUp(self):
super(TestAggregateAddRemoveHost, self).setUp()
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.scheduler.client.report.'
'SchedulerReportClient.get')).mock
self.mock_put = self.useFixture(
fixtures.MockPatch('nova.scheduler.client.report.'
'SchedulerReportClient.put')).mock
def test_get_provider_by_name_success(self):
get_resp = mock.Mock()
get_resp.status_code = 200
get_resp.json.return_value = {
"resource_providers": [
mock.sentinel.expected,
]
}
self.mock_get.return_value = get_resp
name = 'cn1'
res = self.client.get_provider_by_name(self.context, name)
exp_url = "/resource_providers?name=%s" % name
self.mock_get.assert_called_once_with(
exp_url, global_request_id=self.context.global_id)
self.assertEqual(mock.sentinel.expected, res)
@mock.patch.object(report.LOG, 'warning')
def test_get_provider_by_name_multiple_results(self, mock_log):
"""Test that if we find multiple resource providers with the same name,
that a ResourceProviderNotFound is raised (the reason being that >1
resource provider with a name should never happen...)
"""
get_resp = mock.Mock()
get_resp.status_code = 200
get_resp.json.return_value = {
"resource_providers": [
{'uuid': uuids.cn1a},
{'uuid': uuids.cn1b},
]
}
self.mock_get.return_value = get_resp
name = 'cn1'
self.assertRaises(
exception.ResourceProviderNotFound,
self.client.get_provider_by_name, self.context, name)
mock_log.assert_called_once()
@mock.patch.object(report.LOG, 'warning')
def test_get_provider_by_name_500(self, mock_log):
get_resp = mock.Mock()
get_resp.status_code = 500
self.mock_get.return_value = get_resp
name = 'cn1'
self.assertRaises(
exception.ResourceProviderNotFound,
self.client.get_provider_by_name, self.context, name)
mock_log.assert_called_once()
@mock.patch.object(report.LOG, 'warning')
def test_get_provider_by_name_404(self, mock_log):
get_resp = mock.Mock()
get_resp.status_code = 404
self.mock_get.return_value = get_resp
name = 'cn1'
self.assertRaises(
exception.ResourceProviderNotFound,
self.client.get_provider_by_name, self.context, name)
mock_log.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_add_host_success_no_existing(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
agg_uuid = uuids.agg1
mock_get_aggs.return_value = report.AggInfo(aggregates=set([]),
generation=42)
name = 'cn1'
self.client.aggregate_add_host(self.context, agg_uuid, host_name=name)
mock_set_aggs.assert_called_once_with(
self.context, uuids.cn1, set([agg_uuid]), use_cache=False,
generation=42)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name', new=mock.NonCallableMock())
def test_aggregate_add_host_rp_uuid(self, mock_get_aggs, mock_set_aggs):
mock_get_aggs.return_value = report.AggInfo(
aggregates=set([]), generation=42)
self.client.aggregate_add_host(
self.context, uuids.agg1, rp_uuid=uuids.cn1)
mock_set_aggs.assert_called_once_with(
self.context, uuids.cn1, set([uuids.agg1]), use_cache=False,
generation=42)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_add_host_success_already_existing(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
agg1_uuid = uuids.agg1
agg2_uuid = uuids.agg2
agg3_uuid = uuids.agg3
mock_get_aggs.return_value = report.AggInfo(
aggregates=set([agg1_uuid]), generation=42)
name = 'cn1'
self.client.aggregate_add_host(self.context, agg1_uuid, host_name=name)
mock_set_aggs.assert_not_called()
mock_get_aggs.reset_mock()
mock_set_aggs.reset_mock()
mock_get_aggs.return_value = report.AggInfo(
aggregates=set([agg1_uuid, agg3_uuid]), generation=43)
self.client.aggregate_add_host(self.context, agg2_uuid, host_name=name)
mock_set_aggs.assert_called_once_with(
self.context, uuids.cn1, set([agg1_uuid, agg2_uuid, agg3_uuid]),
use_cache=False, generation=43)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name',
side_effect=exception.PlacementAPIConnectFailure)
def test_aggregate_add_host_no_placement(self, mock_get_by_name):
"""Tests that PlacementAPIConnectFailure will be raised up from
aggregate_add_host if get_provider_by_name raises that error.
"""
name = 'cn1'
agg_uuid = uuids.agg1
self.assertRaises(
exception.PlacementAPIConnectFailure,
self.client.aggregate_add_host, self.context, agg_uuid,
host_name=name)
self.mock_get.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_add_host_retry_success(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
gens = (42, 43, 44)
mock_get_aggs.side_effect = (
report.AggInfo(aggregates=set([]), generation=gen) for gen in gens)
mock_set_aggs.side_effect = (
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=42, error='error'),
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=43, error='error'),
None,
)
self.client.aggregate_add_host(self.context, uuids.agg1,
host_name='cn1')
mock_set_aggs.assert_has_calls([mock.call(
self.context, uuids.cn1, set([uuids.agg1]), use_cache=False,
generation=gen) for gen in gens])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_add_host_retry_raises(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
gens = (42, 43, 44, 45)
mock_get_aggs.side_effect = (
report.AggInfo(aggregates=set([]), generation=gen) for gen in gens)
mock_set_aggs.side_effect = (
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=gen, error='error') for gen in gens)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.aggregate_add_host, self.context, uuids.agg1,
host_name='cn1')
mock_set_aggs.assert_has_calls([mock.call(
self.context, uuids.cn1, set([uuids.agg1]), use_cache=False,
generation=gen) for gen in gens])
def test_aggregate_add_host_no_host_name_or_rp_uuid(self):
self.assertRaises(
ValueError,
self.client.aggregate_add_host, self.context, uuids.agg1)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name',
side_effect=exception.PlacementAPIConnectFailure)
def test_aggregate_remove_host_no_placement(self, mock_get_by_name):
"""Tests that PlacementAPIConnectFailure will be raised up from
aggregate_remove_host if get_provider_by_name raises that error.
"""
name = 'cn1'
agg_uuid = uuids.agg1
self.assertRaises(
exception.PlacementAPIConnectFailure,
self.client.aggregate_remove_host, self.context, agg_uuid, name)
self.mock_get.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_remove_host_success_already_existing(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
agg_uuid = uuids.agg1
mock_get_aggs.return_value = report.AggInfo(aggregates=set([agg_uuid]),
generation=42)
name = 'cn1'
self.client.aggregate_remove_host(self.context, agg_uuid, name)
mock_set_aggs.assert_called_once_with(
self.context, uuids.cn1, set([]), use_cache=False, generation=42)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_remove_host_success_no_existing(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
agg1_uuid = uuids.agg1
agg2_uuid = uuids.agg2
agg3_uuid = uuids.agg3
mock_get_aggs.return_value = report.AggInfo(aggregates=set([]),
generation=42)
name = 'cn1'
self.client.aggregate_remove_host(self.context, agg2_uuid, name)
mock_set_aggs.assert_not_called()
mock_get_aggs.reset_mock()
mock_set_aggs.reset_mock()
mock_get_aggs.return_value = report.AggInfo(
aggregates=set([agg1_uuid, agg2_uuid, agg3_uuid]), generation=43)
self.client.aggregate_remove_host(self.context, agg2_uuid, name)
mock_set_aggs.assert_called_once_with(
self.context, uuids.cn1, set([agg1_uuid, agg3_uuid]),
use_cache=False, generation=43)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_remove_host_retry_success(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
gens = (42, 43, 44)
mock_get_aggs.side_effect = (
report.AggInfo(aggregates=set([uuids.agg1]), generation=gen)
for gen in gens)
mock_set_aggs.side_effect = (
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=42, error='error'),
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=43, error='error'),
None,
)
self.client.aggregate_remove_host(self.context, uuids.agg1, 'cn1')
mock_set_aggs.assert_has_calls([mock.call(
self.context, uuids.cn1, set([]), use_cache=False,
generation=gen) for gen in gens])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'set_aggregates_for_provider')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_provider_aggregates')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'get_provider_by_name')
def test_aggregate_remove_host_retry_raises(
self, mock_get_by_name, mock_get_aggs, mock_set_aggs):
mock_get_by_name.return_value = {
'uuid': uuids.cn1,
'generation': 1,
}
gens = (42, 43, 44, 45)
mock_get_aggs.side_effect = (
report.AggInfo(aggregates=set([uuids.agg1]), generation=gen)
for gen in gens)
mock_set_aggs.side_effect = (
exception.ResourceProviderUpdateConflict(
uuid='uuid', generation=gen, error='error') for gen in gens)
self.assertRaises(
exception.ResourceProviderUpdateConflict,
self.client.aggregate_remove_host, self.context, uuids.agg1, 'cn1')
mock_set_aggs.assert_has_calls([mock.call(
self.context, uuids.cn1, set([]), use_cache=False,
generation=gen) for gen in gens])
class TestUsages(SchedulerReportClientTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_counts_for_quota_fail(self, mock_get):
# First call with project fails
mock_get.return_value = fake_requests.FakeResponse(500, content='err')
self.assertRaises(exception.UsagesRetrievalFailed,
self.client.get_usages_counts_for_quota,
self.context, 'fake-project')
mock_get.assert_called_once_with(
'/usages?project_id=fake-project', version='1.9',
global_request_id=self.context.global_id)
# Second call with project + user fails
mock_get.reset_mock()
fake_good_response = fake_requests.FakeResponse(
200, content=jsonutils.dumps(
{'usages': {orc.VCPU: 2,
orc.MEMORY_MB: 512}}))
mock_get.side_effect = [fake_good_response,
fake_requests.FakeResponse(500, content='err')]
self.assertRaises(exception.UsagesRetrievalFailed,
self.client.get_usages_counts_for_quota,
self.context, 'fake-project', user_id='fake-user')
self.assertEqual(2, mock_get.call_count)
call1 = mock.call(
'/usages?project_id=fake-project', version='1.9',
global_request_id=self.context.global_id)
call2 = mock.call(
'/usages?project_id=fake-project&user_id=fake-user', version='1.9',
global_request_id=self.context.global_id)
mock_get.assert_has_calls([call1, call2])
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_counts_for_quota_retries(self, mock_get):
# Two attempts have a ConnectFailure and the third succeeds
fake_project_response = fake_requests.FakeResponse(
200, content=jsonutils.dumps(
{'usages': {orc.VCPU: 2,
orc.MEMORY_MB: 512}}))
mock_get.side_effect = [ks_exc.ConnectFailure,
ks_exc.ConnectFailure,
fake_project_response]
counts = self.client.get_usages_counts_for_quota(self.context,
'fake-project')
self.assertEqual(3, mock_get.call_count)
expected = {'project': {'cores': 2, 'ram': 512}}
self.assertDictEqual(expected, counts)
# Project query succeeds, first project + user query has a
# ConnectFailure, second project + user query succeeds
mock_get.reset_mock()
fake_user_response = fake_requests.FakeResponse(
200, content=jsonutils.dumps(
{'usages': {orc.VCPU: 1,
orc.MEMORY_MB: 256}}))
mock_get.side_effect = [fake_project_response,
ks_exc.ConnectFailure,
fake_user_response]
counts = self.client.get_usages_counts_for_quota(
self.context, 'fake-project', user_id='fake-user')
self.assertEqual(3, mock_get.call_count)
expected['user'] = {'cores': 1, 'ram': 256}
self.assertDictEqual(expected, counts)
# Three attempts in a row have a ConnectFailure
mock_get.reset_mock()
mock_get.side_effect = [ks_exc.ConnectFailure] * 4
self.assertRaises(ks_exc.ConnectFailure,
self.client.get_usages_counts_for_quota,
self.context, 'fake-project')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_counts_default_zero(self, mock_get):
# A project and user are not yet consuming any resources.
fake_response = fake_requests.FakeResponse(
200, content=jsonutils.dumps({'usages': {}}))
mock_get.side_effect = [fake_response, fake_response]
counts = self.client.get_usages_counts_for_quota(
self.context, 'fake-project', user_id='fake-user')
self.assertEqual(2, mock_get.call_count)
expected = {'project': {'cores': 0, 'ram': 0},
'user': {'cores': 0, 'ram': 0}}
self.assertDictEqual(expected, counts)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
def test_get_usages_count_with_pcpu(self, mock_get):
fake_responses = fake_requests.FakeResponse(
200,
content=jsonutils.dumps({'usages': {orc.VCPU: 2, orc.PCPU: 2}}))
mock_get.return_value = fake_responses
counts = self.client.get_usages_counts_for_quota(
self.context, 'fake-project', user_id='fake-user')
self.assertEqual(2, mock_get.call_count)
expected = {'project': {'cores': 4, 'ram': 0},
'user': {'cores': 4, 'ram': 0}}
self.assertDictEqual(expected, counts)
|
apache-2.0
|
gram526/VTK
|
ThirdParty/Twisted/twisted/conch/test/test_scripts.py
|
41
|
1874
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line interfaces to conch.
"""
try:
import pyasn1
except ImportError:
pyasn1Skip = "Cannot run without PyASN1"
else:
pyasn1Skip = None
try:
import Crypto
except ImportError:
cryptoSkip = "can't run w/o PyCrypto"
else:
cryptoSkip = None
try:
import tty
except ImportError:
ttySkip = "can't run w/o tty"
else:
ttySkip = None
try:
import Tkinter
except ImportError:
tkskip = "can't run w/o Tkinter"
else:
try:
Tkinter.Tk().destroy()
except Tkinter.TclError, e:
tkskip = "Can't test Tkinter: " + str(e)
else:
tkskip = None
from twisted.trial.unittest import TestCase
from twisted.scripts.test.test_scripts import ScriptTestsMixin
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the Conch scripts.
"""
skip = pyasn1Skip or cryptoSkip
def test_conch(self):
self.scriptTest("conch/conch")
test_conch.skip = ttySkip or skip
def test_cftp(self):
self.scriptTest("conch/cftp")
test_cftp.skip = ttySkip or skip
def test_ckeygen(self):
self.scriptTest("conch/ckeygen")
def test_tkconch(self):
self.scriptTest("conch/tkconch")
test_tkconch.skip = tkskip or skip
class ZshIntegrationTestCase(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('conch', 'twisted.conch.scripts.conch.ClientOptions'),
('cftp', 'twisted.conch.scripts.cftp.ClientOptions'),
('ckeygen', 'twisted.conch.scripts.ckeygen.GeneralOptions'),
('tkconch', 'twisted.conch.scripts.tkconch.GeneralOptions'),
]
|
bsd-3-clause
|
mskrzypkows/servo
|
tests/wpt/css-tests/tools/wptserve/wptserve/handlers.py
|
86
|
12804
|
import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = request.url_parts.path
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</li>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
pipeline = Pipeline("sub")
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data)
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
|
mpl-2.0
|
dalegregory/odoo
|
addons/account/wizard/account_open_closed_fiscalyear.py
|
237
|
2537
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_open_closed_fiscalyear(osv.osv_memory):
_name = "account.open.closed.fiscalyear"
_description = "Choose Fiscal Year"
_columns = {
'fyear_id': fields.many2one('account.fiscalyear', \
'Fiscal Year', required=True, help='Select Fiscal Year which you want to remove entries for its End of year entries journal'),
}
def remove_entries(self, cr, uid, ids, context=None):
move_obj = self.pool.get('account.move')
data = self.browse(cr, uid, ids, context=context)[0]
period_journal = data.fyear_id.end_journal_period_id or False
if not period_journal:
raise osv.except_osv(_('Error!'), _("You have to set the 'End of Year Entries Journal' for this Fiscal Year which is set after generating opening entries from 'Generate Opening Entries'."))
if period_journal.period_id.state == 'done':
raise osv.except_osv(_('Error!'), _("You can not cancel closing entries if the 'End of Year Entries Journal' period is closed."))
ids_move = move_obj.search(cr, uid, [('journal_id','=',period_journal.journal_id.id),('period_id','=',period_journal.period_id.id)])
if ids_move:
cr.execute('delete from account_move where id IN %s', (tuple(ids_move),))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
firstval/micropython
|
examples/SDdatalogger/datalogger.py
|
98
|
1168
|
# datalogger.py
# Logs the data from the acceleromter to a file on the SD-card
import pyb
# creating objects
accel = pyb.Accel()
blue = pyb.LED(4)
switch = pyb.Switch()
# loop
while True:
# wait for interrupt
# this reduces power consumption while waiting for switch press
pyb.wfi()
# start if switch is pressed
if switch():
pyb.delay(200) # delay avoids detection of multiple presses
blue.on() # blue LED indicates file open
log = open('/sd/log.csv', 'w') # open file on SD (SD: '/sd/', flash: '/flash/)
# until switch is pressed again
while not switch():
t = pyb.millis() # get time
x, y, z = accel.filtered_xyz() # get acceleration data
log.write('{},{},{},{}\n'.format(t,x,y,z)) # write data to file
# end after switch is pressed again
log.close() # close file
blue.off() # blue LED indicates file closed
pyb.delay(200) # delay avoids detection of multiple presses
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.