prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding:utf-8 -*-
import sys
from datetime import datetime
from django.template.loader import get_template
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Post
reload(sys)
sys.setdefaultencoding('utf-8')
# Create your views here.
def homepage(request):
template = get_template('index.html') |
posts = Post.objects.all()
posts_lists = list()
now = datetime.now()
html = template.render(locals())
print sys.getdefaultencoding()
#for count, post in enumerate(posts):
# print post
# print post.pub_date
# print post.slug
# #posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<br />")
# posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<hr />")
# posts_lists.append("<small>" + s | tr(post.body) + "</small><br /><br />")
return HttpResponse(html)
def showpost(request,slug):
template = get_template('post.html')
try:
post = Post.objects.get(slug = slug)
print post
if post != None:
html = template.render(locals())
return HttpResponse(html)
except:
return redirect('/homepage/')
|
ded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a | __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", PendingDeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self._ | _unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path.replace("\\", "/")), safe=b"/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. Se |
def foo(a_new, b_new):
print(a_new + b | _new * 123)
def f():
a = 1
b = 1
foo(a, b | ) |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from django.conf.urls.defaults import *
from django.conf import settings
INSTANCES = r'^(?P<tenant_id>[^/]+)/instances/(?P<instance_id>[^/]+)/%s$'
IMAGES = r'^(?P<tenant_id>[^/]+)/images/(?P<image_id>[^/]+)/%s$'
KEYPAIRS = r'^(?P<tenant_id>[^/]+)/keypairs/%s$'
urlpatterns = patterns('django_openstack.dash.views.instances',
url(r'^(?P<tenant_id>[^/]+)/$', 'usage', name='dash_usage'),
url(r'^(?P<tenant_id>[^/]+)/instances/$', 'index', name='dash_instances'),
url(INSTANCES | % 'console', 'console', name='dash_instances_console'),
url(INSTANCES % 'vnc', 'vnc', name='dash_instances_vnc'),
)
urlpatterns += patterns('django_openstack.dash.views.images',
url(r'^(?P<tenant_id>[^/]+)/images | /$', 'index', name='dash_images'),
url(IMAGES % 'launch', 'launch', name='dash_images_launch'),
)
urlpatterns += patterns('django_openstack.dash.views.keypairs',
url(r'^(?P<tenant_id>[^/]+)/keypairs/$', 'index', name='dash_keypairs'),
url(KEYPAIRS % 'create', 'create', name='dash_keypairs_create'),
)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
from django.db import connection
from django_logutils.conf import settings
logger = logging.getLogger(__name__)
def create_log_dict(request, response):
"""
Create a dictionary with logging data.
"""
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in getattr(settings, 'INTERNAL_IPS', []):
remote_addr = request.META.get(
'HTTP_X_FORWARDED_FOR') or remote_addr
user_email = "-"
if hasattr(request, 'user'):
user_email = getattr(request.user, 'email', '-')
if response.streaming:
content_length = 'streaming'
else:
content_length = len(response.content)
return {
# 'event' makes event-based filtering possible in logging backends
# like logstash
'event': settings.LOGUTILS_LOGGING_MIDDLEWARE_EVENT,
'remote_address': remote_addr,
'user_email': user_email,
'method': request.method,
'url': request.get_full_path(),
'status': response.status_code,
'content_length': content_length,
'request_time': -1, # NA value: real value added by LoggingMiddleware
}
def create_log_message(log_dict, use_sql_info=False, fmt=True):
"""
Create the logging message string.
"""
log_msg = (
"%(remote_address)s %(use | r_email)s %(method)s %(url)s %(status)d "
"%(content_length)d (%(request_time).2f seconds)"
)
if use_sql_info:
sql_time = sum(
float(q['time']) for q in connection.queries) * 1000
extra_log = {
'nr_queries': len(connection.queries),
'sql_time': sql_time}
log_msg += " (%(nr_queries)d SQL queries, | %(sql_time)f ms)"
log_dict.update(extra_log)
return log_msg % log_dict if fmt else log_msg
class LoggingMiddleware(object):
"""
Capture request info and logs it.
Logs all requests with log level info. If request take longer than
REQUEST_TIME_THRESHOLD, log level warningis used.
Logging middleware that captures the following:
* logging event.
* remote address (whether proxied or direct).
* if authenticated, then user email address.
* request method (GET/POST etc).
* request full path.
* response status code (200, 404 etc).
* content length.
* request process time.
* if DEBUG=True or REQUEST_TIME_THRESHOLD is exceeded, also logs SQL
query information - number of queries and how long they too.
Based on: https://djangosnippets.org/snippets/2624/
"""
def __init__(self, *args, **kwargs):
"""
Add initial empty start_time.
"""
self.start_time = None
def process_request(self, request):
"""
Add start time to request.
"""
self.start_time = time.time()
def process_response(self, request, response):
"""
Create the logging message..
"""
try:
log_dict = create_log_dict(request, response)
# add the request time to the log_dict; if no start time is
# available, use -1 as NA value
request_time = (
time.time() - self.start_time if hasattr(self, 'start_time')
and self.start_time else -1)
log_dict.update({'request_time': request_time})
is_request_time_too_high = (
request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD))
use_sql_info = settings.DEBUG or is_request_time_too_high
log_msg = create_log_message(log_dict, use_sql_info, fmt=False)
if is_request_time_too_high:
logger.warning(log_msg, log_dict, extra=log_dict)
else:
logger.info(log_msg, log_dict, extra=log_dict)
except Exception as e:
logger.exception(e)
return response
|
MODULES = {
'{flavor}.models.{name}'.format(**spec): import_model(spec)
for spec in list_models()
}
IS_FAST = {'dbg': False, 'hp': True, 'lp': True}
def model_is_fast(model):
flavor = model.__name__.split('.')[1]
return IS_FAST[flavor]
def iter_examples(module):
assert_hasattr(module, 'EXAMPLES')
EXAMPLES = module.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(EXAMPLES))
assert_in('shared', EXAMPLE)
assert_in('values', EXAMPLE)
values = EXAMPLE['values']
assert_is_instance(values, list)
count = len(values)
assert_true(
count >= 7,
'Add more example values (expected >= 7, found {})'.format(count))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
module = MODULES[name]
assert_hasattr(module, 'Shared')
for EXAMPLE in iter_examples(module):
test_fun(module, EXAMPLE)
@functools.wraps(test_fun)
def test_all_models():
for name in MODULES:
module = MODULES[name]
if all(f(module) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
@for_each_model()
def test_value(module, EXAMPLE):
assert_hasattr(module, 'Value')
assert_is_instance(module.Value, type)
values = EXAMPLE['values']
for value in values:
assert_is_instance(value, module.Value)
@for_each_model()
def test_shared(module, EXAMPLE):
assert_hasattr(module, 'Shared')
assert_is_instance(module.Shared, type)
shared1 = module.Shared.from_dict(EXAMPLE['shared'])
shared2 = module.Shared.from_dict(EXAMPLE['shared'])
assert_close(shared1.dump(), EXAMPLE['shared'])
values = EXAMPLE['values']
seed_all(0)
for value in values:
shared1.add_value(value)
seed_all(0)
for value in values:
shared2.add_value(value)
assert_close(shared1.dump(), shared2.dump())
for value in values:
shared1.remove_value(value)
assert_close(shared1.dump(), EXAMPLE['shared'])
@for_each_model()
def test_group(module, EXAMPLE):
assert_hasattr(module, 'Group')
assert_is_instance(module.Group, type)
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
group1 = module.Group()
group1.init(shared)
for value in values:
group1.add_value(shared, value)
group2 = module.Group.from_values(shared, values)
assert_close(group1.dump(), group2.dump())
group = module.Group.from_values(shared, values)
dumped = group.dump()
group.init(shared)
group.load(dumped)
assert_close(group.dump(), dumped)
for value in values:
group2.remove_value(shared, value)
assert_not_equal(group1, group2)
group2.merge(shared, group1)
for value in values:
group1.score_value(shared, value)
for _ in xrange(10):
value = group1.sample_value(shared)
group1.score_value(shared, value)
module.sample_group(shared, 10)
group1.score_data(shared)
group2.score_data(shared)
@for_each_model(lambda module: hasattr(module.Shared, 'protobuf_load'))
def test_protobuf(module, EXAMPLE):
if not has_protobuf:
raise SkipTest('protobuf not available')
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
Message = getattr(distributions.io.schema_pb2, module.NAME)
message = Message.Shared()
shared.protobuf_dump(message)
| shared2 = module.Shared()
shared2.protobuf_load(message)
assert_close(shared2.dump(), shared.dump())
message.Clear()
dumped = shared.dump()
module.Shared.to_protobuf(dumped, message)
assert_close(module.Shared.from_protobuf(message), dumped)
if hasattr(module.Group, 'protobuf_load'):
for value in values:
shared.add_value(value)
group = module.Group.from_values(shared, values)
message = Message.Group()
group.protobuf_dump(mes | sage)
group2 = module.Group()
group2.protobuf_load(message)
assert_close(group2.dump(), group.dump())
message.Clear()
dumped = group.dump()
module.Group.to_protobuf(dumped, message)
assert_close(module.Group.from_protobuf(message), dumped)
@for_each_model()
def test_add_remove(module, EXAMPLE):
# Test group_add_value, group_remove_value, score_data, score_value
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group = module.Group.from_values(shared)
score = 0.0
assert_close(group.score_data(shared), score, err_msg='p(empty) != 1')
for _ in range(DATA_COUNT):
value = group.sample_value(shared)
values.append(value)
score += group.score_value(shared, value)
group.add_value(shared, value)
group_all = module.Group.from_dict(group.dump())
assert_close(
score,
group.score_data(shared),
err_msg='p(x1,...,xn) != p(x1) p(x2|x1) p(xn|...)')
numpy.random.shuffle(values)
for value in values:
group.remove_value(shared, value)
group_empty = module.Group.from_values(shared)
assert_close(
group.dump(),
group_empty.dump(),
err_msg='group + values - values != group')
numpy.random.shuffle(values)
for value in values:
group.add_value(shared, value)
assert_close(
group.dump(),
group_all.dump(),
err_msg='group - values + values != group')
@for_each_model()
def test_add_repeated(module, EXAMPLE):
# Test add_repeated value vs n * add
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for value in EXAMPLE['values']:
group = module.Group.from_values(shared)
for _ in range(DATA_COUNT):
group.add_value(shared, value)
group_repeated = module.Group.from_values(shared)
group_repeated.add_repeated_value(shared, value, count=DATA_COUNT)
assert_close(
group.dump(),
group_repeated.dump(),
err_msg='n * add_value != add_repeated_value n')
@for_each_model()
def test_add_merge(module, EXAMPLE):
# Test group_add_value, group_merge
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values'][:]
for value in values:
shared.add_value(value)
numpy.random.shuffle(values)
group = module.Group.from_values(shared, values)
for i in xrange(len(values) + 1):
numpy.random.shuffle(values)
group1 = module.Group.from_values(shared, values[:i])
group2 = module.Group.from_values(shared, values[i:])
group1.merge(shared, group2)
assert_close(group.dump(), group1.dump())
@for_each_model()
def test_group_merge(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
group1 = module.Group.from_values(shared)
group2 = module.Group.from_values(shared)
expected = module.Group.from_values(shared)
actual = module.Group.from_values(shared)
for _ in xrange(100):
value = expected.sample_value(shared)
expected.add_value(shared, value)
group1.add_value(shared, value)
value = expected.sample_value(shared)
expected.add_value(shared, value)
group2.add_value(shared, value)
actual.load(group1.dump())
actual.merge(shared, group2)
assert_close(actual.dump(), expected.dump())
@for_each_model(lambda module: module.Value in [bool, int])
def test_group_allows_debt(module, EXAMPLE):
# Test that group.add_value can safely go into data debt
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
gro |
"""Tests for the Volumi | o integration. | """
|
# --- running
self.isInRunningLst = parent.isInRunningLst
self.appendToRunning = parent.appendToRunning
self.removeFromRunning = parent.removeFromRunning
# --- fault
self.isInFaultLst = parent.isInFaultLst
self.appendToFault = parent.appendToFault
self.removeFromFault = parent.removeFromFault
# --- hang
self.isInHangLst = parent.isInHangLst
self.appendToHang = parent.appendToHang
self.removeFromHang = parent.removeFromHang
# --- mailto
self.mailto = parent.mailto
def fireEvent(self, attrName, value, timestamp=None, quality=None):
attrFullName = "%s%s%s"\
% (self.devName.replace("/", SEPARATOR), SEPARATOR, attrName)
try:
if timestamp and quality:
self.fireEventsList([[attrFullName, value, timestamp,
quality]])
else:
self.fireEventsList([[attrFullName, value]])
except Exception as e:
self.error_stream("Cannot fire event for %s/%s: %s"
% (self.devName, attrName, e | ))
traceback.print_exc()
class Dog(Logger):
def __init__(self, devN | ame, joinerEvent=None, startDelay=None,
extraAttrs=None, *args, **kwargs):
super(Dog, self).__init__(*args, **kwargs)
self._devName = devName
self._devProxy = None
self._eventId = None
self._devState = None
# --- fault vbles
self._tryFaultRecovery = False
self._faultRecoveryCtr = 0
self._devStatus = None
# --- hangVbles
self._tryHangRecovery = False
self._hangRecoveryCtr = 0
# --- Thread for hang monitoring
self._joinerEvent = joinerEvent
self._thread = None
self._recheckPeriod = DEFAULT_RECHECK_TIME
self._overlaps = 0
self._overlapsAlert = DEFAULT_nOVERLAPS_ALERT
# --- extra attributes
self._extraAttributes = []
self._extraEventIds = {}
self._extraAttrValues = {}
for attrName in extraAttrs:
attrName = attrName.lower()
self._extraAttributes.append(attrName)
self._extraEventIds[attrName] = None
self._extraAttrValues[attrName] = None
# --- build proxy and event subscriptions
self.__buildProxy()
self.__createThread(startDelay)
def __str__(self):
return "Dog(%s, state=%s)" % (self.devName, self.devState)
def __repr__(self):
return "Dog(%s, state=%s, faultRecovery=%s, hangRecovery=%s)"\
% (self.devName, self.devState, self.tryFaultRecovery,
self.tryHangRecovery)
# --- object properties
@property
def devName(self):
return self._devName
@property
def devProxy(self):
return self._devProxy
@property
def devState(self):
return self._devState
def hasExtraAttr(self, attrName):
return self._extraAttributes.count(attrName.lower()) > 0
def getExtraAttr(self, attrName):
try:
value = self._devProxy[attrName].value
timestamp = self._devProxy[attrName].time.totime()
quality = self._devProxy[attrName].quality
if value != self._extraAttrValues[attrName]:
self.debug_stream("%s/%s has changed from %s to %s"
% (self.devName, attrName,
self._extraAttrValues[attrName], value))
self._extraAttrValues[attrName] = value
self.fireEvent(attrName, value, timestamp, quality)
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
return value
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
if e[0].reason in ['ATTRIBUTE_UNAVAILABLE',
'SOFTWARE_FAILURE']:
return
self.warn_stream("%s/%s read exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s read exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be read" % (self.devName, attrName))
def setExtraAttr(self, attrName, value):
try:
self.info_stream("Writing %s/%s with %s"
% (self.devName, attrName, str(value)))
self._devProxy[attrName] = value
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
self.warn_stream("%s/%s write exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s write exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be write" % (self.devName, attrName))
@property
def tryFaultRecovery(self):
return self._tryFaultRecovery
@tryFaultRecovery.setter
def tryFaultRecovery(self, value):
if type(value) == bool:
self._tryFaultRecovery = value
else:
self.error_stream("Only boolean assignment")
@property
def tryHangRecovery(self):
return self._tryHangRecovery
@tryFaultRecovery.setter
def tryHangRecovery(self, value):
if type(value) == bool:
if value and not Astor:
self.error_stream("This feature is only available with "
"fandango's Astor present")
return
self._tryHangRecovery = value
else:
self.error_stream("Only boolean assignment")
@property
def recheckPeriod(self):
return self._recheckPeriod
@property
def overlapsAlert(self):
return self._overlapsAlert
# --- Constructor methods
def __buildProxy(self):
try:
self._devProxy = DeviceProxy(self._devName)
self.__subscribe_event()
except Exception as e:
self.error_stream("%s proxy not available: %s"
% (self._devName, e))
self.appendToHang(self.devName)
def __subscribe_event(self):
self._eventId = \
self._devProxy.subscribe_event('State',
EventType.CHANGE_EVENT,
self)
self.debug_stream("Subscribed to %s/State (id=%d)"
% (self.devName, self._eventId))
self.__subscribe_extraAttrs()
def __unsubscribe_event(self):
if self._eventId:
try:
self._devProxy.unsubscribe_event(self._eventId)
except Exception as e:
self.error_stream("%s failed to unsubscribe event: %s"
% (self.devName, e))
self._eventId = None
else:
self.warn_stream("%s no event id to unsubscribe." % (self.devName))
self.__unsubscribe_extraAttrs()
def __subscribe_extraAttrs(self):
for attrName in self._extraAttributes:
try:
self._extraEventIds[attrName] = \
self._devProxy.subscribe_event(attrName,
EventType.CHANGE_EVENT,
self)
self.debug_stream("Subscribed to %s/%s (id=%d)"
% (self.devName, attrName,
|
-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": str(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice | only
"stderr": stderr,
| "msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Star |
to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"])
teams_orgs = settings.get_hrm_teams_orgs()
if teams_orgs:
if teams_orgs == 1:
multiple = False
else:
multiple = True
ottable = s3db.org_organisation_team
label = ottable.organisation_id.label
ottable.organisation_id.label = ""
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("organisation_team",
label = label,
fields = ["organisation_id"],
multiple = multiple,
),
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments | ",
"organisation_team.organisation_id$name",
"organisation_team.organisation_id$acronym",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by organization name or acronym. You may use % as | wildcard. Press 'Search' without input to list all."),
#_class="filter-search",
),
S3OptionsFilter("organisation_team.organisation_id",
label = T("Organization"),
#hidden=True,
),
]
list_fields = ["organisation_team.organisation_id",
"name",
"description",
"comments",
]
s3db.configure("pr_group",
create_next = create_next,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
else:
s3db.configure("pr_group",
create_next = create_next,
)
if r.interactive or r.representation in ("aadata", "xls", "pdf"):
if r.component_name == "group_membership":
hrm_configure_pr_group_membership()
if r.representation == "xls":
# Modify Title of Report to show Team Name
s3.crud_strings.pr_group_membership.title_list = r.record.name
# Make it match Import sheets
tablename = "pr_group_membership"
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
s3db.hrm_human_resource.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
S3CRUD.action_buttons(r, update_url=update_url)
if current.deployment_settings.has_module("msg") and \
current.auth.permission.has_permission("update", c="hrm",
f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn send",
"label": s3_str(T("Send Message"))})
return output
s3.postp = postp
if team_name == "Team":
label = T("Team Details")
elif team_name == "Group":
label = T("Group Details")
else:
label = T("Basic Details")
tabs = [(label, None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership"),
(T("Documents"), "document"),
]
return current.rest_controller("pr", "group",
csv_stylesheet = ("hrm", "group.xsl"),
csv_template = "group",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs),
)
# =============================================================================
def hrm_human_resource_controller(extra_filter = None):
"""
Human Resources Controller, defined in the model for use from
multiple controllers for unified menus
- used for Summary & Profile views, Imports and S3AddPersonWidget
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
def prep(r):
# Apply extra filter from controller
if extra_filter is not None:
r.resource.add_filter(extra_filter)
c = r.controller
deploy = c == "deploy"
vol = c == "vol"
if deploy:
# Apply availability filter
s3db.deploy_availability_filter(r)
elif settings.get_hrm_unavailability():
# Apply availability filter
s3db.pr_availability_filter(r)
if s3.rtl:
# Ensure that + appears at the beginning of the number
# - using table alias to only apply to filtered component
f = s3db.get_aliased(s3db.pr_contact, "pr_phone_contact").value
f.represent = s3_phone_represent
f.widget = S3PhoneWidget()
method = r.method
if method in ("form", "lookup"):
return True
elif method == "profile":
# Adapt list_fields for pr_address
s3db.table("pr_address") # must load model before get_config
list_fields = s3db.get_config("pr_address", "list_fields")
list_fields.append("comments")
# Show training date without time
s3db.hrm_training.date.represent = lambda d: \
S3DateTime.date_represent(d, utc=True)
# Adapt list_fields for hrm_training
list_fields = ["course_id",
"training_event_id$site_id",
"date",
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from os.path import dirname, abspath, join | , curdir
from nose.tools import assert_equals, with_setup
from tests.asserts import prepare_stdout
def test_imports_terrain_under_path_that_is_run():
old_path = abspath(curdir)
os.chdir(join(abspath(dirname(__file__)), 'simple_features', '1st_feature_dir'))
status, output = subprocess.getstatusoutput('python -c "from lettuce import | world;assert hasattr(world, \'works_fine\'); print \'it passed!\'"')
assert_equals(status, 0)
assert_equals(output, "it passed!")
os.chdir(old_path)
@with_setup(prepare_stdout)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
from lettuce import step
from lettuce import Runner
from lettuce.terrain import before, after, world
world.all_steps = []
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append 1 in world all steps')
def append_1_in_world_all_steps(step):
world.all_steps.append("1")
@step('append 2 more')
def append_2_more(step):
world.all_steps.append("2")
@step('append 3 in world all steps')
def append_during_to_all_steps(step):
world.all_steps.append("3")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
runner = Runner(join(abspath(dirname(__file__)), 'simple_features', '2nd_feature_dir'))
runner.run()
assert_equals(
world.all_steps,
['before', '1', '2', '3', 'after']
)
|
try:
from setuptools import setup, Extension
except ImportError:
from di | stutils.core import setup, Extension
setup(name='peloton_bloomfilters',
author = 'Adam DePrince',
author_email = 'adam@pelotoncycle.com',
url = 'https://github.com/pelotoncycle/peloton_bloomfilters',
version='0.0.1',
description="Peloton Cycle's Bloomin fast Bloomfilters",
ext_modules=(
| [
Extension(
name='peloton_bloomfilters',
sources=['peloton_bloomfiltersmodule.c']),
]
)
)
|
from django.forms.fields import *
from corehq.apps.sms.forms import BackendForm
from dimagi.utils.django.fields import TrimmedCharField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
class TelerivetBackendForm(BackendForm):
api_key = TrimmedCharField()
project_id = TrimmedCharField()
phone_id = TrimmedCharField()
webhook_secret = TrimmedCharField()
def clean_webhook_secret(self):
# | Circular import
from corehq.apps.telerivet.models import TelerivetBackend
value = self.cleaned_data.get("webhook_secret", None)
backend = TelerivetBackend.by_webhook_secret( | value)
if backend is not None and backend._id != self._cchq_backend_id:
raise ValidationError(_("Already in use."))
return value
|
"""Render meshes using OpenDR.
Code is from:
https://github.com/akanazawa/hmr/blob/master/src/util/renderer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import cv2
import numpy as np
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
colors = {
# colorbline/print/copy safe:
'light_blue': [0.65098039, 0.74117647, 0.85882353],
'light_pink': [.9, .7, .7], # This is used to do no-3d
}
class SMPLRenderer(object):
"""Utility class to render SMPL models."""
def __init__(self, img_size=224, flength=500., face_path='smpl_faces.npy'):
self.faces = np.load(face_path)
self.w = img_size
self.h = img_size
self.flength = flength
def __call__(self,
verts,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color_id=0,
img_size=None):
# cam is 3D [f, px, py]
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
imtmp = render_model(
verts,
self.faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color_id=color_id)
return (imtmp * 255).astype('uint8')
def rotated(self,
verts,
deg,
cam=None,
axis='y',
img=None,
do_alpha=True,
far=None,
near=None,
color_id=0,
img_size=None):
if axis == 'y':
around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
elif axis == 'x':
around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
else:
around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
center = verts.mean(axis=0)
new_v = np.dot((verts - center), around) + center
return self.__call__(
new_v,
cam,
img=img,
do_alpha=do_alpha,
far=far,
near=near,
img_size=img_size,
color_id=color_id)
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(120),
color=colors['light_pink']):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=l | en(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.v | c += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge(
(b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color_id=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1 else img
if color_id is None:
color = colors['light_blue']
else:
color_list = colors.values()
color = color_list[color_id % len(color_list)]
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp
|
# --coding: utf8--
import requests
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
class Country(models.Model):
"""
Модель страны.
"""
title = models.CharField(
u'название', max_length=255)
class Meta:
verbose_name = u'страна'
verbose_name_plural = u'страны'
ordering = ['title']
def __unicode__(self):
return self.title
class BaseAddress(models.Model):
"""
Базовый класс адреса с ГЕО данными.
"""
country = models.ForeignKey(
Country,
verbose_name=u'страна')
area = models.CharField(
u'область', max_length=255, blank=True)
subarea = models.CharField(
u'район', max_length=255, blank=True)
locality = models.CharField(
u'населенный пункт', max_length=255)
street = models.CharField(
u'улица', max_length=255, blank=True)
house = models.CharField(
u'дом', max_length=50, blank=True)
apartment = models.CharField(
u'офис', max_length=10, blank=True)
zip = models.CharField(
u'почтовый индекс', max_length=10, blank=True)
coordinates = models.PointField(
u'координаты', blank=True, null=True) # широта долгота
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'адрес'
verbose_name_plural = u'адреса'
def __unicode__(self):
return ', '.join(part for part in [self.zip, self.country.title,
self.area, self.subarea,
self.locality, self.stre | et,
self.house] if part)
def fetch_coordinates(self):
"""
Запрос координатов объекта с Яндекса.
"""
query = ',+'.join(
| part for part in [self.country.title, self.area, self.subarea,
self.locality, self.street, self.house] if part)
url = u'http://geocode-maps.yandex.ru/1.x/?geocode=%s&format=json' % (
query)
try:
r = requests.get(url).json()
except requests.exceptions.RequestException:
return None
try:
longitude, latitude = (r['response']['GeoObjectCollection']
['featureMember'][0]['GeoObject']['Point']
['pos']).split(' ')
return GEOSGeometry(U'POINT(%s %s)' % (longitude, latitude))
except (KeyError, IndexError):
return None
def get_short_address(self):
return ', '.join(part for part in [self.area, self.locality] if part)
class Region(models.Model):
"""
Класс для географического региона.
"""
name = models.CharField(u'название', max_length=255)
coordinates = models.PolygonField(u'координаты')
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'регион'
verbose_name_plural = u'регионы'
ordering = ['name']
def __unicode__(self):
return self.name
|
from distutils.core import setup
from distutils.extension import Extension
|
setup(
name='wordcloud',
version='1.1.3',
url='https://github.com/amueller/word_cloud',
description='A little word cloud generator',
license='MIT',
ext_modul | es=[Extension("wordcloud.query_integral_image",
["wordcloud/query_integral_image.c"])],
packages=['wordcloud'],
package_data={'wordcloud': ['stopwords', 'DroidSansMono.ttf']}
)
|
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import sys
import tempfile
import itertools
import unittest
try:
import flask.ext.autodoc
except ImportError as e:
raise unittest.SkipTest('Flask-Autodoc not installed')
try:
import digits
except ImportError:
# Add path for DIGITS package
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config; digits.config.load_config()
from digits.webapp import app, _doc as doc
from . import generate_docs as _
def check_doc_file(generator, doc_filename):
"""
Checks that the output generated by generator matches the contents of doc_filename
"""
with tempfile.NamedTemporaryFile(suffix='.md') as tmp_file:
generator.generate(tmp_file.name)
tmp_file.seek(0)
with open(doc_filename) as doc_file:
# memory friendly
for doc_line, tmp_line in itertools.izip(doc_file, tmp_file):
doc_line = doc_line.strip()
tmp_line = tmp_line.strip()
if doc_line.startswith('*Generated') and \
tmp_line.startswith('*Generated'):
# If the date is different, that's not a problem
pass
elif doc_line != tmp_line:
print '(Previous)', doc_line
print '(New) ', tmp_line
raise RuntimeError('%s needs to be regenerated. Use scripts/generate_docs.py' % doc_filename)
def test_api_md():
"""API.md out-of-date"""
with app.app_ | context():
generator = _.ApiDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', | 'API.md'))
def test_flask_routes_md():
"""FlaskRoutes.md out-of-date"""
with app.app_context():
generator = _.FlaskRoutesDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'FlaskRoutes.md'))
|
# | -*- coding:utf8 -*-
a = 3
b | = 4
print a+b |
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import eng_notation
import math
class abstract_converter(object):
def external_to_internal(self, v):
"""
Convert from user specified value to value acceptable to underlying primitive.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def internal_to_external(self, s):
"""
Convert from underlying primitive value to user specified value.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def help(self):
return "Any string is acceptable"
class identity_converter(abstract_converter):
def external_to_internal(self,v):
return v
def internal_to_external(self, s):
return s
########################################################################
# Commonly used converters
########################################################################
class chooser_converter(abstract_converter):
"""
Convert between a set of possible choices and an index.
Used in the chooser base and all sub-classes.
"""
def __init__(self, choices):
#choices must be a list because tuple does not have .index() in python2.5
self._choices = list(choices)
def external_to_internal(self, choice):
return self._choices.index(choice)
def internal_to_external(self, index):
return self._choices[index]
def help(self):
return 'Enter a possible value in choices: "%s"'%str(self._choices)
class bool_converter(abstract_converter):
"""
The internal representa | tion is boolean.
The external representation is specified.
Used in the check box form.
"""
def __init__(self, true, false):
self._true = true
self._false = false
def external_to_internal(self, v):
return bool(v)
def internal_to_external(self, v):
if v: return self._true
else: return self._false
def help(self):
return "Value must be cast-able to type bool."
class eval_converter(abstract_converter) | :
"""
A catchall converter when int and float are not enough.
Evaluate the internal representation with python's eval().
Possible uses, set a complex number, constellation points.
Used in text box.
"""
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eval(s)
def help(self):
return "Value must be evaluatable by python's eval."
class str_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return str(s)
class int_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%d'%round(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return int(s, 0)
def help(self):
return "Enter an integer. Leading 0x indicates hex"
class float_converter(abstract_converter):
def __init__(self, formatter=eng_notation.num_to_str):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eng_notation.str_to_num(s)
def help(self):
return "Enter a float with optional scale suffix. E.g., 100.1M"
class slider_converter(abstract_converter):
"""
Scale values to and from the slider.
"""
def __init__(self, minimum, maximum, num_steps, cast):
assert minimum < maximum
assert num_steps > 0
self._offset = minimum
self._scaler = float(maximum - minimum)/num_steps
self._cast = cast
def external_to_internal(self, v):
#slider's internal representation is an integer
return int(round((v - self._offset)/self._scaler))
def internal_to_external(self, v):
return self._cast(v*self._scaler + self._offset)
def help(self):
return "Value should be within slider range"
class log_slider_converter(slider_converter):
def __init__(self, min_exp, max_exp, num_steps, base):
assert min_exp < max_exp
assert num_steps > 0
self._base = base
slider_converter.__init__(self, minimum=min_exp, maximum=max_exp, num_steps=num_steps, cast=float)
def external_to_internal(self, v):
return slider_converter.external_to_internal(self, math.log(v, self._base))
def internal_to_external(self, v):
return self._base**slider_converter.internal_to_external(self, v)
|
lf._owner_level = 0
self._prefetch = prefetch
self._track_last_enqueued_event_properties = (
track_last_enqueued_event_properties
)
self._id = str(uuid.uuid4())
self._internal_kwargs = get_dict_with_loop_if_needed(loop)
self._running = False
self._consumers = {} # type: Dict[str, EventHubConsumer]
self._ownership_manager = OwnershipManager(
cast("EventHubConsumerClient", self._eventhub_client),
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._load_balancing_strategy,
self._partition_id,
)
def __repr__(self) -> str:
return "EventProcessor: id {}".format(self._id)
async def _cancel_tasks_for_partitions(
self, to_cancel_partitions: Iterable[str]
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to cancel partitions %r",
self._id,
to_cancel_partitions
)
for partition_id in to_cancel_partitions:
task = self._tasks.get(partition_id)
if task:
task.cancel()
_LOGGER.info(
"EventProcessor %r has cancelled partition %r",
sel | f._id,
partition_id
)
if partition_id not in self._consumers: # task is cancelled before the consumer is created
del self._tasks[partition_id]
def _create_tasks_for_claimed_ownership(
self,
claimed_partitions: Iterable[str],
checkpoints: Optional[Dict[str, Dict[str, Any]]] = None,
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to | claim partition %r",
self._id,
claimed_partitions
)
for partition_id in claimed_partitions:
if partition_id not in self._tasks or self._tasks[partition_id].done():
checkpoint = checkpoints.get(partition_id) if checkpoints else None
if self._running:
self._tasks[partition_id] = get_running_loop().create_task(
self._receive(partition_id, checkpoint)
)
_LOGGER.info(
"EventProcessor %r has claimed partition %r",
self._id,
partition_id
)
async def _process_error(
self, partition_context: PartitionContext, err: Exception
) -> None:
if self._error_handler:
try:
await self._error_handler(partition_context, err)
except Exception as err_again: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_error. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err_again,
)
async def _close_partition(
self, partition_context: PartitionContext, reason: CloseReason
) -> None:
_LOGGER.info(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
reason,
)
if self._partition_close_handler:
try:
await self._partition_close_handler(partition_context, reason)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_close. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err,
)
await self._process_error(partition_context, err)
async def _on_event_received(
self, partition_context: PartitionContext, event: Union[Optional[EventData], List[EventData]]
) -> None:
if event:
try:
partition_context._last_received_event = event[-1] # type: ignore #pylint:disable=protected-access
except TypeError:
partition_context._last_received_event = event # type: ignore # pylint:disable=protected-access
links = get_event_links(event)
with self._context(links=links):
await self._event_handler(partition_context, event)
else:
await self._event_handler(partition_context, event)
async def _close_consumer(self, partition_context):
partition_id = partition_context.partition_id
try:
await self._consumers[partition_id].close()
del self._consumers[partition_id]
await self._close_partition(
partition_context,
CloseReason.OWNERSHIP_LOST if self._running else CloseReason.SHUTDOWN,
)
await self._ownership_manager.release_ownership(partition_id)
finally:
if partition_id in self._tasks:
del self._tasks[partition_id]
async def _receive(
self, partition_id: str, checkpoint: Optional[Dict[str, Any]] = None
) -> None: # pylint: disable=too-many-statements
try: # pylint:disable=too-many-nested-blocks
_LOGGER.info("start ownership %r, checkpoint %r", partition_id, checkpoint)
(
initial_event_position,
event_position_inclusive,
) = self.get_init_event_position(partition_id, checkpoint)
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
partition_context._last_received_event = None # pylint:disable=protected-access
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store,
)
self._partition_contexts[partition_id] = partition_context
event_received_callback = partial(
self._on_event_received, partition_context
)
self._consumers[partition_id] = self.create_consumer( # type: ignore
partition_id,
initial_event_position,
event_position_inclusive,
event_received_callback, # type: ignore
)
if self._partition_initialize_handler:
try:
await self._partition_initialize_handler(partition_context)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_initialize. The exception is %r.",
self._id,
self._eventhub_name,
partition_id,
self._consumer_group,
err,
)
await self._process_error(partition_context, err)
while self._running:
try:
await self._consumers[partition_id].receive(
self._batch, self._max_batch_size, self._max_wait_time
)
except asyncio.CancelledError:
_LO |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Error(msrest.serialization.Model):
"""this is the management partner operations error.
:param error: this is the ExtendedErrorInfo property.
:type error: ~azure.mgmt.managementpartner.models.ExtendedErrorInfo
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ExtendedErrorInfo'},
}
| def __init__ | (
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ExtendedErrorInfo(msrest.serialization.Model):
"""this is the extended error info.
:param code: this is the error response code. Possible values include: "NotFound", "Conflict",
"BadRequest".
:type code: str or ~azure.mgmt.managementpartner.models.ErrorResponseCode
:param message: this is the extended error info message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedErrorInfo, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class OperationDisplay(msrest.serialization.Model):
"""this is the management partner operation.
:param provider: the is management partner provider.
:type provider: str
:param resource: the is management partner resource.
:type resource: str
:param operation: the is management partner operation.
:type operation: str
:param description: the is management partner operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""this is the management partner operations list.
:param value: this is the operation response list.
:type value: list[~azure.mgmt.managementpartner.models.OperationResponse]
:param next_link: Url to get the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class OperationResponse(msrest.serialization.Model):
"""this is the management partner operations response.
:param name: this is the operation response name.
:type name: str
:param display: this is the operation display.
:type display: ~azure.mgmt.managementpartner.models.OperationDisplay
:param origin: the is operation response origin information.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationResponse, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
class PartnerResponse(msrest.serialization.Model):
"""this is the management partner operations response.
Variables are only populated by the server, and will be ignored when sending a request.
:param etag: Type of the partner.
:type etag: int
:ivar id: Identifier of the partner.
:vartype id: str
:ivar name: Name of the partner.
:vartype name: str
:ivar type: Type of resource. "Microsoft.ManagementPartner/partners".
:vartype type: str
:param partner_id: This is the partner id.
:type partner_id: str
:param partner_name: This is the partner name.
:type partner_name: str
:param tenant_id: This is the tenant id.
:type tenant_id: str
:param object_id: This is the object id.
:type object_id: str
:param version: This is the version.
:type version: int
:param updated_time: This is the DateTime when the partner was updated.
:type updated_time: ~datetime.datetime
:param created_time: This is the DateTime when the partner was created.
:type created_time: ~datetime.datetime
:param state: This is the partner state. Possible values include: "Active", "Deleted".
:type state: str or ~azure.mgmt.managementpartner.models.ManagementPartnerState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'etag': {'key': 'etag', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'partner_id': {'key': 'properties.partnerId', 'type': 'str'},
'partner_name': {'key': 'properties.partnerName', 'type': 'str'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'object_id': {'key': 'properties.objectId', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'int'},
'updated_time': {'key': 'properties.updatedTime', 'type': 'iso-8601'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PartnerResponse, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.id = None
self.name = None
self.type = None
self.partner_id = kwargs.get('partner_id', None)
self.partner_name = kwargs.get('partner_name', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.object_id = kwargs.get('object_id', None)
self.version = kwargs.get('version', None)
self.updated_time = kwargs.get('updated_time', None)
self.created_time = kwargs.get('created_time', None)
self.state = kwargs.get('state', None)
|
from django.http.response i | mport HttpResponse
from django.shortcuts import rende | r_to_response, render
from Browser.models import UserInfo
from Browser.views import cellar, administrator
def simple_response(request, *args, **kwargs):
template_name = kwargs["path"]
if kwargs["type"] :
template_name = kwargs["type"] + "/" + template_name
userInfo = UserInfo.getUserInfo(request)
context = {
"isMetic" : userInfo.isMetic(),
"isYeoman" : userInfo.isYeoman(),
"isAdmin" : userInfo.isAdmin(),
"isSuper" : userInfo.isSuper()
}
return HttpResponse(render(request, template_name, context)) |
import logging
import shlex
import subprocess
import json
from airflow.hooks.aws_emr import EMRHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
from slackclient import SlackClient
from time import sleep
import os
class AwsEMROperator(BaseOperator):
ui_color = '#00BFFF'
sc = None
@apply_defaults
def __init__(
self,
event_xcoms=None,
aws_emr_conn_id='aws_default',
xcom_push=True,
command_args=[[]],
channel="#airflow_notifications",
download_these_files=[],
start_cluster=False,
terminate_cluster=False,
xcom_task_id="job_runner",
dn_dir="./tmp",
username='airflow',
method='chat.postMessage',
icon_url='https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
*args,
**kwargs):
"""
Start by just invoking something.
"""
super(AwsEMROperator, self).__init__(*args, **kwargs)
self.channel = channel
self.username = username
self.icon_url = icon_url
self.download_these_files = download_these_files
self.conn_id = aws_emr_conn_id
self.method = 'chat.postMessage'
self.command_args = command_args
self.start_cluster = start_cluster
self.terminate_cluster = terminate_cluster
self.dn_dir = dn_dir
self.xcom_task_id = xcom_task_id
def slack_connect(self):
self.sc = SlackClient(self.token)
def slack_message(self, text):
self.token = os.environ["SLACK_API_TOKEN"]
if not self.sc:
self.slack_connect()
api_params = {
'channel': self.channel,
'username': self.username,
'text': text,
'icon_url': self.icon_url,
'link_names': 1
}
self.sc.api_call(self.method, **api_params)
def construct_command(self):
command = "aws emr create-cluster"
for key, value in self.command_args:
command = command + " " + key + " " + value
logging.info("Command is: " + command)
return shlex.split(command)
def exec_command(self, command):
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr != b'':
logging.info("Non zero exit code.")
logging.info(stderr)
raise AirflowException("The return code is non zero: " +
stderr)
print(stdout)
print(type(stdout))
try:
output = json.loads(stdout.replace("\n", ""))["ClusterId"]
except TypeError:
output = json.loads(stdout.decode("utf-8")\
.replace("\n",""))["ClusterId"]
logging.info("output_id: " + output)
return output
def execute(self, context):
s3_hook = S3Hook()
for bucket, key in self.download_these_files:
print(bucket)
print(key)
basename = os.path.basename(key)
print(basename)
print(os.path.join(self.dn_dir, basename))
local_path = os.path.join(self.dn_dir, basename)
s3_hook.download_file(bucket, key, local_path)
job_monitor = EMRHook(emr_conn_id="S3_default")
if self.start_cluster:
output_id = self.exec_command(self.construct_command())
context['ti'].xcom_push(key="code", value=output_id)
if self.terminate_cluster:
output_id = context['ti'].xcom_pull(
task_id=self.xcom_task_id, key="code")
self.slack_message("""
@channel\n ----------------------------------------\nThe Cluster is being terminated for this job. \n ----------------------------------------\nProcess id = %s
""" % output_id)
job_monitor.terminate_job(output_id)
self.slack_message("""
@channel
The task Id of the n | ew job is: %s
""" %
output_id)
while True:
if job_monitor.isRunning(output_id):
sleep(900)
elif job_monitor.isSuccessfull(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process i | s Successful.\n Manual check is always a good thing. \n ----------------------------------------\nProcess id = %s
""" % output_id)
break
elif job_monitor.isTerminated(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process has been terminated\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException("The process is terminated")
elif job_monitor.isWaiting(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process is WAITING\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException(
"Somebody needs to go see whats up. Spark Job is in Waiting State for id: %s" % output_id)
else:
sleep(300)
# def slack_message():
# token = os.environ["SLACK_API_TOKEN"]
# sc = SlackClient(token)
# api_params = {
# 'channel': "airflow_notifications",
# 'username': "airflow",
# 'text': "ssup @channel",
# 'icon_url': 'https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
# 'link_names': 1
# }
# sc.api_call("chat.postMessage", **api_params)
|
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.deeplab import SeparableASPP
class TestSeparableASPP(unittest.TestCase):
def setUp(self):
self.in_channels = 128
self.out_channels = 32
self.link = SeparableASPP(
self.in_channels, self.out_channels)
def check_call(self):
xp = self.link.xp
x = chainer.Variable(xp.random.uniform(
low=-1, h | igh= | 1, size=(2, self.in_channels, 64, 64)
).astype(xp.float32))
y = self.link(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertEqual(y.shape, (2, self.out_channels, 64, 64))
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
o the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import logging
from prettytable import PrettyTable
from six.moves import zip
from st2client import formatters
from st2client.utils import strutil
from st2client.utils.terminal import get_terminal_size
LOG = logging.getLogger(__name__)
# Minimum width for the ID to make sure the ID column doesn't wrap across
# multiple lines
MIN_ID_COL_WIDTH = 26
DEFAULT_ATTRIBUTE_DISPLAY_ORDER = ['id', 'name', 'pack', 'description']
class MultiColumnTable(formatters.Formatter):
@classmethod
def format(cls, entries, *args, **kwargs):
attributes = kwargs.get('attributes', [])
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
widths = kwargs.get('widths', [])
widths = widths or []
if not widths and attributes:
# Dynamically calculate column size based on the terminal size
lines, cols = get_terminal_size()
if attributes[0] == 'id':
# consume iterator and save as entries so collection is accessible later.
entries = [e for e in entries]
# first column contains id, make sure it's not broken up
first_col_width = cls._get_required_column_width(values=[e.id for e in entries],
minimum_width=MIN_ID_COL_WIDTH)
cols = (cols - first_col_width)
col_width = int(math.floor((cols / len(attributes))))
else:
col_width = int(math.floor((cols / len(attributes))))
first_col_width = col_width
widths = []
for index in range(0, len(attributes)):
if index == 0:
widths.append(first_col_width)
else:
widths.append(col_width)
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in entries[0].__dict__
if not attr.startswith('_')])
# Determine table format.
if len(attributes) == len(widths):
# Customize width for each column.
columns = zip(attributes, widths)
else:
# If only 1 width value is provided then
# apply it to all columns else fix at 28.
width = widths[0] if len(widths) == 1 else 28
columns = zip(attributes,
[width for i in range(0, len(attributes))])
# Format result to table.
table = PrettyTable()
for column in columns:
table.field_names.append(column[0])
table.max_width[column[0]] = column[1]
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for entry in entries:
# TODO: Improve getting values of nested dict.
values = []
for field_name in table.field_names:
if '.' in field_name:
field_names = field_name.split('.')
value = getattr(entry, field_names.pop(0), {})
for name in field_names:
value = cls._get_field_value(value, name)
if type(value) is str:
break
value = strutil.unescape(value)
values.append(value)
else:
value = cls._get_simple_field_value(entry, field_name)
transform_function = attribute_transform_functions.get(field_name,
lambda value: value)
value = transform_function(value=value)
value = strutil.unescape(value)
values.append(value)
table.add_row(values)
return table
@staticmethod
def _get_simple_field_value(entry, field_name):
"""
Format a value for a simple field.
"""
value = getattr(entry, field_name, '')
if isinstance(value, (list, tuple)):
if len(value) == 0:
value = ''
elif isinstance(value[0], (str, unicode)):
# List contains simple string values, format it as comma
# separated string
value = ', '.join(value)
return value
@staticmethod
def _get_field_value(value, field_name):
r_val = value.get(field_name, N | one)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
| return r_val if len(r_val) > 0 else ''
return r_val
@staticmethod
def _get_friendly_column_name(name):
if not name:
return None
friendly_name = name.replace('_', ' ').replace('.', ' ').capitalize()
return friendly_name
@staticmethod
def _get_required_column_width(values, minimum_width=0):
max_width = len(max(values, key=len)) if values else minimum_width
return max_width if max_width > minimum_width else minimum_width
class PropertyValueTable(formatters.Formatter):
@classmethod
def format(cls, subject, *args, **kwargs):
attributes = kwargs.get('attributes', None)
attribute_display_order = kwargs.get('attribute_display_order',
DEFAULT_ATTRIBUTE_DISPLAY_ORDER)
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in subject.__dict__
if not attr.startswith('_')])
for attr in attribute_display_order[::-1]:
if attr in attributes:
attributes.remove(attr)
attributes = [attr] + attributes
table = PrettyTable()
table.field_names = ['Property', 'Value']
table.max_width['Property'] = 20
table.max_width['Value'] = 60
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for attribute in attributes:
if '.' in attribute:
field_names = attribute.split('.')
value = cls._get_attribute_value(subject, field_names.pop(0))
for name in field_names:
value = cls._get_attribute_value(value, name)
if type(value) is str:
break
else:
value = cls._get_attribute_value(subject, attribute)
transform_function = attribute_transform_functions.get(attribute,
lambda value: value)
value = transform_function(value=value)
if type(value) is dict or type(value) is list:
value = json.dumps(value, indent=4)
value = strutil.unescape(value)
table.add_row([attribute, value])
return table
@staticmethod
def _get_attribute_value(subject, attribute):
if isinstance(subject, dict):
r_val = subject.get(attribute, None)
else:
r_val = getattr(subject, attribute, None)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
return r_val if len(r_val) > 0 el |
or c in cycles)
big = self.size - 1
if not any(i == big for c in cycles for i in c):
s += f'({big})'
return f'Cycle{s}'
def __init__(self, *args):
"""Load up a Cycle instance with the values for the cycle.
Examples
========
>>> Cycle(1, 2, 6)
Cycle(1, 2, 6)
"""
super().__init__()
if not args:
return
if len(args) == 1:
if isinstance(args[0], Permutation):
for c in args[0].cyclic_form:
self.update(self(*c))
return
elif isinstance(args[0], Cycle):
for k, v in args[0].items():
self[k] = v
return
args = [as_int(a) for a in args]
if any(i < 0 for i in args):
raise ValueError('negative integers are not allowed in a cycle.')
if has_dups(args):
raise ValueError('All elements must be unique in a cycle.')
for i in range(-len(args), 0):
self[args[i]] = args[i + 1]
@property
def size(self):
if not self:
return 0
return max(self.keys()) + 1
def copy(self):
return Cycle(self)
class Permutation(Basic):
"""
A permutation, alternatively known as an 'arrangement number' or 'ordering'
is an arrangement of the elements of an ordered list into a one-to-one
mapping with itself. The permutation of a given arrangement is given by
indicating the positions of the elements after re-arrangement. For
example, if one started with elements [x, y, a, b] (in that order) and
they were reordered as [x, y, b, a] then the permutation would be
[0, 1, 3, 2]. Notice that (in Diofant) the first element is always referred
to as 0 and the permutation uses the indices of the elements in the
original ordering, not the elements (a, b, etc...) themselves.
>>> Permutation.print_cyclic = False
Notes
=====
*Permutations Notation*
Permutations are commonly represented in disjoint cycle or array forms.
*Array Notation and 2-line Form*
In the 2-line form, the elements and their final positions are shown
as a matrix with 2 rows:
[0 1 2 ... n-1]
[p(0) p(1) p(2) ... p(n-1)]
Since the first line is always range(n), where n is the size of p,
it is sufficient to represent the permutation by the second line,
referred to as the "array form" of the permutation. This is entered
in brackets as the argument to the Permutation class:
>>> p = Permutation([0, 2, 1])
>>> p
Permutation([0, 2, 1])
Given i in range(p.size), the permutation maps i to i^p
>>> [i ^ p for i in range(p.size)]
[0, 2, 1]
The composite of two permutations p*q means first apply p, then q, so
i^(p*q) = (i^p)^q which is i^p^q according to Python precedence rules:
>>> q = Permutation([2, 1, 0])
>>> [i ^ p ^ q for i in range(3)]
[2, 0, 1]
>>> [i ^ (p*q) for i in range(3)]
[2, 0, 1]
One can use also the notation p(i) = i^p, but then the composition
rule is (p*q)(i) = q(p(i)), not p(q(i)):
>>> [(p*q)(i) for i in range(p.size)]
[2, 0, 1]
>>> [q(p(i)) for i in range(p.size)]
[2, 0, 1]
>>> [p(q(i)) for i in range(p.size)]
[1, 2, 0]
*Disjoint Cycle Notation*
In disjoint cycle notation, only the elements that have shifted are
indicated. In the above case, the 2 and 1 switched places. This can
be entered in two ways:
>>> Permutation(1, 2) == Permutation([[1, 2]]) == p
True
Only the relative ordering of elements in a cycle matter:
>>> Permutation(1, 2, 3) == Permutation(2, 3, 1) == Permutation(3, 1, 2)
True
The disjoint cycle notation is convenient when representing permutations
that have several cycles in them:
>>> Permutation(1, 2)(3, 5) == Permutation([[1, 2], [3, 5]])
True
It also provides some economy in entry when computing products of
permutations that are written in disjoint cycle notation:
>>> Permutation(1, 2)(1, 3)(2, 3)
Permutation([0, 3, 2, 1])
>>> _ == Permutation([[1, 2]])*Permutation([[1, 3]])*Permutation([[2, 3]])
True
Entering a singleton in a permutation is a way to indicate the size of the
permutation. The ``size`` keyword can also be used.
Array-form entry:
>>> Permutation([[1, 2], [9]])
Permutation([0, 2, 1], size=10)
>>> Permutation([[1, 2]], size=10)
Permutation([0, 2, 1], size=10)
Cyclic-form entry:
>>> Permutation(1, 2, size=10)
Permutation([0, 2, 1], size=10)
>>> Permutation(9)(1, 2)
Permutation([0, 2, 1], size=10)
Caution: no singleton containing an element larger than the largest
in any previous cycle can be entered. This is an important differ | ence
in how Permutation and Cycle handle the __call__ syntax. A singleton
argument at the start of a Permutation performs instantiation of the
Permutation and is permitted:
>>> Permutation(5)
Permutation([], size=6)
A singleton entered after instantiation is a call to the permutation
-- a function call -- and if the argument is out of range it will
trigger an error. Fo | r this reason, it is better to start the cycle
with the singleton:
The following fails because there is is no element 3:
>>> Permutation(1, 2)(3)
Traceback (most recent call last):
...
IndexError: list index out of range
This is ok: only the call to an out of range singleton is prohibited;
otherwise the permutation autosizes:
>>> Permutation(3)(1, 2)
Permutation([0, 2, 1, 3])
>>> Permutation(1, 2)(3, 4) == Permutation(3, 4)(1, 2)
True
*Equality testing*
The array forms must be the same in order for permutations to be equal:
>>> Permutation([1, 0, 2, 3]) == Permutation([1, 0])
False
*Identity Permutation*
The identity permutation is a permutation in which no element is out of
place. It can be entered in a variety of ways. All the following create
an identity permutation of size 4:
>>> I = Permutation([0, 1, 2, 3])
>>> all(p == I for p in [Permutation(3), Permutation(range(4)),
... Permutation([], size=4), Permutation(size=4)])
True
Watch out for entering the range *inside* a set of brackets (which is
cycle notation):
>>> I == Permutation([range(4)])
False
*Permutation Printing*
There are a few things to note about how Permutations are printed.
1) If you prefer one form (array or cycle) over another, you can set that
with the print_cyclic flag.
>>> Permutation(1, 2)(4, 5)(3, 4)
Permutation([0, 2, 1, 4, 5, 3])
>>> p = _
>>> Permutation.print_cyclic = True
>>> p
Permutation(1, 2)(3, 4, 5)
>>> Permutation.print_cyclic = False
2) Regardless of the setting, a list of elements in the array for cyclic
form can be obtained and either of those can be copied and supplied as
the argument to Permutation:
>>> p.array_form
[0, 2, 1, 4, 5, 3]
>>> p.cyclic_form
[[1, 2], [3, 4, 5]]
>>> Permutation(_) == p
True
3) Printing is economical in that as little as possible is printed while
retaining all information about the size of the permutation:
>>> Permutation([1, 0, 2, 3])
Permutation([1, 0, 2, 3])
>>> Permutation([1, 0, 2, 3], size=20)
Permutation([1, 0], size=20)
>>> Permutation([1, 0, 2, 4, 3, 5, 6], size=20)
Permutation([1, 0, 2, 4, 3], size=20)
>>> p = Permutation([1, 0, 2, 3])
>>> Permutation.print_cyclic = True
>>> p
Permutation(3)(0, 1)
>>> Permutation.print_cyclic = False
The 2 was not printed but it is still there as can be seen with the
array_form and size methods:
>>> p.array_form
[1, 0, 2, 3]
>>> p.size
4
*Short introduction to other methods*
The permutation can act as a bijective function, telling what element is
located at a given position
>>> q = Permutation([5, 2, 3, 4, 1, 0])
>>> q.array_form[1] # the hard way
2
>>> q |
"""
def __init__(self, name, size, border_layers):
if not (type(border_layers) == list):
border_layers = [border_layers]
Cell.__init__(self, name)
self.size_x, self.size_y = size
# Create the border of the cell
for l in border_layers:
self.border = Box(
(-self.size_x / 2., -self.size_y / 2.),
(self.size_x / 2., self.size_y / 2.),
1,
layer=l)
self.add(self.border) # Add border to the frame
self.align_markers = None
def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False):
if not (type(layers) == list):
layers = [layers]
top_mk_cell = Cell('AlignmentMark')
for l in layers:
if not joy_markers:
am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l)
rect_mk_cell = Cell("RectMarker")
rect_mk_cell.add(am0)
top_mk_cell.add(rect_mk_cell)
elif joy_markers:
crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)]
crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist())))
am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape
joy_mk_cell = Cell("JOYMarker")
joy_mk_cell.add(am0)
top_mk_cell.add(joy_mk_cell)
if camps_markers:
emw = 20. # 20 um e-beam marker width
camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l)
camps_mk_cell = Cell("CAMPSMarker")
camps_mk_cell.add(camps_mk)
top_mk_cell.add(camps_mk_cell, origin=[100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[100., -100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., -100.])
self.align_markers = Cell("AlignMarkers")
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1]))
self.add(self.align_markers)
def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle,
array_height, array_width, array_spacing, layers):
if not (type(layers) == list):
layers = [layers]
if not (type(_pitches) == list):
_pitches = [_pitches]
if not (type(_lengths) == list):
_lengths = [_lengths]
if not (type(_widths) == list):
_widths = [_widths]
manyslits = i = j = None
for l in layers:
i = -1
j = -1
manyslits = Cell("SlitArray")
pitch = _pitches[0]
for length in _lengths:
j += 1
i = -1
for width in _widths:
# for pitch in pitches:
i += 1
if i % 3 == 0:
j += 1 # Move to array to | next line
i = 0 # Restart at left
pitch_v = pitch / np.cos(np.deg2rad(rot_angle))
# widthV = width / np.cos(np.deg2rad(rotAngle))
nx = int(array_width / (length + spacing))
ny = int(array_height / pitch_v)
| # Define the slits
slit = Cell("Slits")
rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l)
rect = rect.copy().rotate(rot_angle)
slit.add(rect)
slits = CellArray(slit, nx, ny, (length + spacing, pitch_v))
slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch_v / 2.))
slit_array = Cell("SlitArray")
slit_array.add(slits)
text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l)
lbl_vertical_offset = 1.35
if j % 2 == 0:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, -array_height / lbl_vertical_offset)))) # Center justify label
else:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, array_height / lbl_vertical_offset)))) # Center justify label
slit_array.add(text)
manyslits.add(slit_array,
origin=((array_width + array_spacing) * i, (
array_height + 2. * array_spacing) * j - array_spacing / 2.))
self.add(manyslits,
origin=(-i * (array_width + array_spacing) / 2, -(j + 1.5) * (
array_height + array_spacing) / 2))
# %%Create the pattern that we want to write
lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field
lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True)
# Define parameters that we will use for the slits
widths = [0.004, 0.008, 0.012, 0.016, 0.028, 0.044]
pitches = [1.0, 2.0]
lengths = [10., 20.]
smFrameSize = 400
slitColumnSpacing = 3.
# Create the smaller write field and corresponding markers
smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), [])
smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), [])
smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), [])
smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), [])
smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField4.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), [])
centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
# Add everything together to a top cell
topCell = Cell("TopCell")
topCell.add(lgField)
smFrameSpacing = 400 # Spacing between the three small frames
dx = smFrameSpacing + smFrameSize
dy = smFrameSpacing + smFrameSize
topCell.add(smField1, origin=(-dx / 2., dy / 2.))
topCell.add(smField2, origin=(dx / 2., dy / 2.))
topCell.add(smField3, origin=(-dx / 2., -dy / 2.))
topCell.add(smField4, origin=(dx / 2., -dy / 2.))
topCell.add(centerAlignField, origin=(0., 0.))
topCell.spacing = np.array([4000., 4000.])
# %%Create the layout and output GDS file
layout = Layout('LIBRARY')
if putOnWafer: # Fit as many patterns on a 2inch wafer as possible
wafer = MBE100Wafer('MembranesWafer', cells=[topCell])
layout.add(wafer)
# layout.show()
else: # Only output a single copy of the pattern (not on a wafer)
layout.add(topCell)
layout.show()
filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks)
filename = filestring.replace(' ', '_') + '.gds'
layout.save(filename)
cell_layout = Layout('LIBRARY')
cell_layout.add(wafer.blocks[0])
cell_layout.save(filestring.replace(' ', '_') + '_ |
"""
Persistence configuration
"""
PERSISTENCE_BACKEND = 'pyp | eman.persistence.SqliteBackend'
PERSISTENCE_CONFIG = {"path":'/tmp/to_be_removed_849827198746.sqlite | '}
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Publ | ic License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have | received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
__all__ = ['Minutes20Test']
class Minutes20Test(BackendTest):
BACKEND = 'minutes20'
def test_new_messages(self):
for message in self.backend.iter_unread_messages():
pass
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Vodafone España, S.A.
# Author: Andrew Bird
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is | distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free | Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from wader.common.consts import WADER_CONNTYPE_USB
from core.hardware.zte import (ZTEWCDMADevicePlugin,
ZTEWCDMACustomizer,
ZTEWrapper)
class ZTEMF180Wrapper(ZTEWrapper):
def send_ussd(self, ussd):
"""Sends the ussd command ``ussd``"""
# XXX: assumes it's the same as 637U
# MF180 wants request in ascii chars even though current
# set might be ucs2
return super(ZTEMF180Wrapper, self).send_ussd(ussd, force_ascii=True)
class ZTEMF180Customizer(ZTEWCDMACustomizer):
wrapper_klass = ZTEMF180Wrapper
class ZTEMF180(ZTEWCDMADevicePlugin):
""":class:`~core.plugin.DevicePlugin` for ZTE's MF180"""
name = "ZTE MF180"
version = "0.1"
author = u"Andrew Bird"
custom = ZTEMF180Customizer()
__remote_name__ = "MF180"
__properties__ = {
'ID_VENDOR_ID': [0x19d2],
'ID_MODEL_ID': [0x2003],
}
conntype = WADER_CONNTYPE_USB
zte_mf180 = ZTEMF180()
|
from __future__ import absolute_import, unicode_literals
import mock
import pytest
from ddns_zones_updater.configreader import ConfigReader
from ddns_zones_updater.core import DDNSZoneUpdater
@pytest.fixture
def fake_config_reader_with_two_hosts():
host_1 = mock.Mock(do_update=mock.Mock())
host_2 = mock.Mock(do_update=mock.Mock())
class FakeHostManager(mock.Mock):
__iter__ = mock.Mock(return_value=(h for h in [host_1, host_2]))
class FakeConfigReader(mock.Mock):
hosts = FakeHostManager()
return [host_1, host_2], FakeConfigReader()
@pytest.fixture
def updater_without_calling_init(request):
patcher = mock.patch.object(DDNSZoneUpdater, "__init__", return_value=None)
patcher.start()
request.addfinalizer(patcher.stop)
return DDNSZoneUpdater("path/t | o/config.ini")
@mock.patch.object(ConfigReader, "read")
@mock.patch.object(ConfigReader, "__init__", return_value=None)
def test_initializer(mock_init, mock_read):
DDNSZoneUpdater("/tmp/foo.ini")
mock_init.assert_called_once_with("/tmp/foo.ini")
mock_read.assert_called_once_with()
def test_get_current_wan_ip(updater_w | ithout_calling_init):
updater = updater_without_calling_init
with mock.patch("ipgetter.myip", return_value="149.0.0.31") as mock_my_ip:
assert updater.current_wan_ip() == "149.0.0.31"
mock_my_ip.assert_called_once_with()
def test_run(updater_without_calling_init, fake_config_reader_with_two_hosts):
updater = updater_without_calling_init
hosts, updater.config = fake_config_reader_with_two_hosts
with mock.patch("ipgetter.myip", return_value="1.1.1.1") as mock_my_ip:
updater.run()
for host in hosts:
host.do_update.assert_called_once_with("1.1.1.1")
mock_my_ip.assert_called_once_with()
|
#!/usr/bin/env python
import unittest
from test import support
import socket
import urllib.request
import sys
import os
import email.message
def _open_with_retry(fun | c, host, *args, **kwargs):
# Connecting to remote hosts is flaky. Make it more robust
# by retrying the connection several times.
last_exc = None
for i in range(3):
try:
return func(host, *args, **kwargs)
except IOError as err:
last_exc = err
continue
except:
raise
raise last_exc
class URLTimeoutTest(unittest.TestCase):
TIMEOUT = 10 | .0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
f = _open_with_retry(urllib.request.urlopen, "http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
def urlopen(self, *args):
return _open_with_retry(urllib.request.urlopen, *args)
def test_basic(self):
# Simple test expected to pass.
open_url = self.urlopen("http://www.python.org/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assert_(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assert_(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_readlines(self):
# Test both readline and readlines.
open_url = self.urlopen("http://www.python.org/")
try:
self.assert_(isinstance(open_url.readline(), bytes),
"readline did not return bytes")
self.assert_(isinstance(open_url.readlines(), list),
"readlines did not return a list")
finally:
open_url.close()
def test_info(self):
# Test 'info'.
open_url = self.urlopen("http://www.python.org/")
try:
info_obj = open_url.info()
finally:
open_url.close()
self.assert_(isinstance(info_obj, email.message.Message),
"object returned by 'info' is not an instance of "
"email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if sys.platform in ('win32',):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
open_url = self.urlopen("http://www.python.org/")
fd = open_url.fileno()
FILE = os.fdopen(fd, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from file created using fd "
"returned by fileno failed")
finally:
FILE.close()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen,
"http://www.python.invalid./")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
def urlretrieve(self, *args):
return _open_with_retry(urllib.request.urlretrieve, *args)
def test_basic(self):
# Test basic functionality.
file_location,info = self.urlretrieve("http://www.python.org/")
self.assert_(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from the file location returned"
" by urlretrieve failed")
finally:
FILE.close()
os.unlink(file_location)
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
file_location,info = self.urlretrieve("http://www.python.org/",
support.TESTFN)
self.assertEqual(file_location, support.TESTFN)
self.assert_(os.path.exists(file_location))
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from temporary file failed")
finally:
FILE.close()
os.unlink(file_location)
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
file_location, header = self.urlretrieve("http://www.python.org/")
os.unlink(file_location)
self.assert_(isinstance(header, email.message.Message),
"header is not an instance of email.message.Message")
def test_main():
support.requires('network')
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
|
import json
with open('data/78mm.json', 'r') as _78mm:
polygons78 = json.load(_78mm)["features"][0]["geometry"]["geometries"]
with open('data/100mm.json', 'r') as _100mm:
polygons100 = json.load(_100mm)["features"][0]["geometry"]["geometries"]
with open('data/130mm.json', 'r') as _130mm:
polygons130 = json.load(_130mm)["features"][0]["geometry"]["geometries"]
def dot(x1, y1, x2, y2):
return x1*y1+x2*y2
def det(x1, y1, x2, y2):
return x1*y2-x2*y1
def dett(x0, y0, x1, y1, x2, y2):
z = det(x1-x0, y1-y0, x2-x0, y2-y0)
return -1 if z < 0 else z > 0
'''
inline DB ang(cPo p0,cPo p1){return acos(dot(p0,p1)/p0.len()/p1.len());}
def ang(x1, y1, x2, y2):
return
def arg(x1, y1, x2, y2):
DB a=ang(x,y);return~dett(x,y)?a:2*PI-a;}
return
'''
def intersect(lx1, ly1, lx2, ly2, rx1, ry1, rx2, ry2):
return 1 if (dett(lx1, ly1, lx2, ly2, rx1, ry1) * dett(lx1, ly1, lx2, ly2, rx2, ry2) <= 0 and
dett(rx1, ry1, rx2, ry2, lx1, ly1) * dett(rx1, ry1, rx2, ry2, lx2, ly2) <= 0) else 0
def within(p, x, y):
z = 0
for i in range(0, len(p)-1):
if x == p[i][0] and y == p[i][1]:
continue
if x == p[i+1][0] and y == p[i+1][1]:
continue
z += intersect(x, y, -3232, -4344, p[i][0], p[i][1], p[i+1][0], p[i+1][1])
return 1 if z % 2 == 1 else 0
def _check(p, d, x, y):
for i in range(0, len(p)):
if within(p[i]["coordinates"][0], x, y):
return [d, i]
return []
def check(x, y):
res = _check(polygons78, 78, x, y)
if len(res) > 0:
return 0.2 # 0.078
res = _check(polygons100, 100, x, y)
if len(res) > 0:
return 0.5 # 0.1
res = _check(polygons130, 130, x, y)
if len(res) > 0:
return 0.8 # 0.13
return 1.0
# init()
# #display()
# #x, y = 121.555764, 24.9833
#
# x, y = 121.565764, 24.9830
# res = check(x, y)
# print res
# if (len(res) > 0):
# if (res[0] == 78):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Red')
# if (res[0] == 100):
# print_polygon(polygon | s78[res[1]]["coordinates"][0], 'Orange')
# if (res[0] == 130):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Yellow | ')
# plt.plot(x, y, marker='*')
# ax.grid()
# ax.axis('equal')
# plt.show()
|
where latitud < -7.177 and latitud > -8.9722
# /* Ancash*/
# select count(*) from t_boya_medicion_minpres
# where latitud < -8.9722 and latitud > -10.593
import glob, os
import psycopg2
import datetime
db_user = "USER"
db_host = "IP_ADDRESS"
db_password = "PASSWORD"
output = "./Output/datos_total_boya3_est7_ca1.csv"
class Departamento (object):
def __init__(self, nombre, latitud_min, latitud_max):
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
class Zona (object):
def __init__(self, start_date, end_date, nombre, latitud_min, latitud_max, temperatura, presion, salinidad):
self.start_date = start_date
self.end_date = end_date
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class boya_data (object):
def __init__(self, temperatura, presion, salinidad):
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class estacion_data (object):
# def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar):
# self.est_temperatura_m = temperatura_m
# self.est_punto_rocio_m= punto_rocio_m
# self.est_presion_nivel_mar = presion_nivel_mar
def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar,
presion_est_media, velocidad_viento_media, temperatura_maxima,
temperatura_minima):
self.est_temperatura_m = temperatura_m
self.est_punto_rocio_m= punto_rocio_m
self.est_presion_nivel_mar = presion_nivel_mar
self.est_presion_est_media = presion_est_media
self.est_temperatura_minima = temperatura_minima
self.est_temperatura_maxima = temperatura_maxima
self.est_velocidad_viento_media = velocidad_viento_media
class caudal_data (object):
def __init__(self, caudal):
self.caudal = caudal
def database_select_date_between(start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, db_host, db_password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < -3.392 and latitud > -4.078 AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
for row in rows:
print " ", row
def database_select_date_between_lat(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
for row in rows:
count = row[0]
return count
def database_select_date_between_lat_avg(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select avg(temp), avg(pres), avg(psal) from t_boya_medicion_minpres " \
" where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = boya_data(row[0], row[1], row[2])
return b_data
def database_select_date_between_lat_avg_estacion(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host. password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "Select avg(em.temp_m), avg(em.punto_rocio_m), avg(em.presion_nivel_mar), " \
"avg(em.presion_est_m), avg(em.veloc_viento_m), avg(em.temp_max), avg(em.temp_min) " \
" From t_region r, t_estacion e, t_estacion_medicion em " \
" Where e.id_region = r.id_region AND r.nombre like '%s' " \
" AND em.id_estacion = e.id_estacion " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = estacion_data(row[0], row[1], row[2 | ], row[3], row[4], row[5], row[6])
return b_data
def database_select_date_between_lat_avg_caudal(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = " Select avg(c.caudal) | From t_caudal_medicion c " \
" Where c.region like '%s' AND c.caudal != 9999 " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
c_data = None
for row in rows:
c_data = caudal_data(row[0])
return c_data
# def count_boyas_range_space_and_time(i, start_date_unix, step_date, latitude, longitude):
# t_start = start_date_unix + i * step_date
# t_end = start_date_unix + (i + 1) * step_date
# start_date = datetime.datetime.fromtimestamp(t_start).strftime("%Y-%m-%d")
# end_date = datetime.datetime.fromtimestamp(t_end).strftime("%Y-%m-%d")
# count = database_select_date_between_lat(latitude, longitude, start_date, end_date)
# print "%s -- %s -> %s" % (start_date, end_date, count)
# return count
if __name__ == '__main__':
# datebase = 1422766800
maximo = 1467522000
periodo = 18
delta = 0
toDate = 24*3600*periodo
#n = 27
#27
# 26, 16 = 8 8
# 26, 18 = 8 10
# 26, 20 = 10 10
# 24, 20 = 9 10
# 22, 22 = 12, 12 2015-03-18
# 20, 24 = 13, 11
# 14, 34 = 21, 13
departamentos = []
departamentos.append(Departamento("Tumbes", "-3.392", "-4.078"))
departamentos.append(Departamento("Piura", "-4.078", "-6.382"))
departamentos.append(Departamento("Lambayeque", "-6.382", "-7.177"))
departamentos.append(Departamento("La Libertad", "-7.177", "-8.9722"))
departamentos.append(Departamento("Ancash", "-8.9722", "-10.593"))
rango_fechas = []
rango_fechas_status = []
start_date_unix = int(datetime.datetime.strptime("2015-03-05","% |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.queries import get_property_name
from jx_sqlite.utils import GUID, untyped_column
from mo_dots import concat_field, relative_field, set_default, startswith_field
from mo_json import EXISTS, OBJECT, STRUCT
from mo_logs import Log
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path, snowflake):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
self.path = concat_field(snowflake.fact_name, nested_path[0])
self.nested_path = nested_path
self.snowflake = snowflake
# def add(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.columns.append(column)
#
# for np in self.nested_path:
# rel_name = column.names[np]
# container = self.namespace.setdefault(rel_name, set())
# hidden = [
# c
# for c in container
# if len(c.nested_path[0]) < len(np)
# ]
# for h in hidden:
# container.remove(h)
#
# container.add(column)
#
# container = self.namespace.setdefault(column.es_column, set())
# container.add(column)
# def remove(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.snowflake.namespace.columns.find(self.path, item)
return output
# def __copy__(self):
# output = Schema(self.nested_path)
# for k, v in self.namespace.items():
# output.namespace[k] = copy(v)
# return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
relative_name = relative_field(column.name, self.nested_path[0])
return get_property_name(relative_name)
@property
def namespace(self):
return self.snowflake.namespace
def keys(self):
"""
:return: ALL COLUMN NAMES
"""
return set(c.name for c in self.columns)
@property
def columns(self):
return self.snowflake.namespace.columns.find(self.snowflake.fact_name)
def column(self, prefix):
full_name = untyped_column(concat_field(self.nested_path, prefix))
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k, t in [untyped_column(c.name)]
if k == full_name and k != GUID
if c.jx_type not in [OBJECT, EXISTS]
)
def leaves(self, prefix):
full_name = concat_field(self.nested_path, prefix)
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k in [c.name]
if startswith_field(k, full_name) and k != GUID or k == full_name
if c.jx_type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = r | elative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.jx_type in STRUCT:
cont | inue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.name, []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.name), []).append(c)
return set_default(origin_dict, fact_dict)
|
__problem_title__ = "Co | mfortable distance"
__problem_url___ = "https://projecteuler.net/problem=364"
__problem_description__ = "There are seats in a row. people come after each other to fill the " \
"seats according to the following rules: We can verify that T(10) = " \
"61632 and T(1 000) mod 100 000 007 = 47255094. Find T(1 000 000) mod " \
"100 000 007."
import timeit
class Solution():
@ | staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
required=True,
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('in | itial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput | ,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.hypervisor_hostname,
host.hypervisor_hostname)
for host in hosts
if host.hypervisor_hostname != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
|
, "\nPat", F_["BIN"]),
("Bpc", "\nBpc", F_["BIN"]),
("Bpt", "\nBpt", F_["BIN"]),
("HwBP", "\nHbr", F_["BIN"]),
("Save", "\nSva", F_["BIN"]), # sometimes 4, sometimes 5 ?
("AnalyseHint", "\nAht", F_["BIN"]),
("CMD_PLUGINS", "\nUs0", F_["DDSTRING"]), # multiline, needs escaping
("U_LABEL", "\nUs1", F_["DDSTRING"]),
("A_LABEL", "\nUs4", F_["DDSTRING"]),
("U_COMMENT", "\nUs6", F_["DDSTRING"]),
("BPCOND", "\nUs8", F_["DDSTRING"]),
("ApiArg", "\nUs9", F_["DDSTRING"]),
("USERLABEL", "\nUs1", F_["DDSTRING"]),
("Watch", "\nUsA", F_["DDSTRING"]),
("US2", "\nUs2", F_["BIN"]),
("US3", "\nUs3", F_["BIN"]),
("_CONST", "\nUs5", F_["BIN"]),
("A_COMMENT", "\nUs7", F_["BIN"]),
("FIND?", "\nUsC", F_["BIN"]),
("SOURCE?", "\nUsI", F_["BIN"]),
("MRU_Inspect","\nUs@", F_["MRUSTRING"]),
("MRU_Asm", "\nUsB", F_["MRUSTRING"]),
("MRU_Goto", "\nUsK", F_["MRUSTRING"]), #?
("MRU_Explanation", "\nUs|", F_["MRUSTRING"]), # logging bp explanation
("MRU_Expression", "\nUs{", F_["MRUSTRING"]), # logging bp expression
("MRU_Watch", "\nUsH", F_["MRUSTRING"]),
("MRU_Label", "\nUsq", F_["MRUSTRING"]), #?
("MRU_Comment", "\nUsv", F_["MRUSTRING"]), #?
("MRU_Condition", "\nUsx", F_["MRUSTRING"]), #?
("MRU_CMDLine", "\nCml", F_["STRING"]), #?
("LogExpression", "\nUs;", F_["DDSTRING"]), # logging bp expression
("ANALY_COMM", "\nUs:", F_["DDSTRING"]), #
("US?", "\nUs?", F_["DDSTRING"]), #?
("TracCond", "\nUsM", F_["DDSTRING"]), # tracing condition
("LogExplanation", "\nUs<", F_["DDSTRING"]), # logging bp explanation
("AssumedArgs", "\nUs=", F_["DDSTRING"]), # Assumed arguments
("CFA", "\nCfa", F_["DD2"]), #?
("CFM", "\nCfm", F_["DD2STRING"]), #?
("CFI", "\nCfi", F_["DD2"]), #?
("US>", "\nUs>", F_["BIN"]), #?
("ANC", "\nAnc", F_["BIN"]), #?
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
]
#OllyDbg 2
chunk_types20 = [
("Header", HDR_STRING, F_["STRING"]),
("Footer", FTR_STRING, F_["EMPTY"]),
("Filename", "\nFil", F_["STRING"]),
("Infos", "\nFcr", F_["CRC2"]), #?
("Name", "\nNam", F_["NAME"]), #?
("Data", "\nDat", F_["NAME"]), #?
("MemMap", "\nMba", F_["DDSTRING"]), #?
("LSA", "\nLsa", F_["NAME"]), # MRU entries
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
("CBR", "\nCbr", F_["BIN"]), #?
("LBR", "\nLbr", F_["BIN"]), #?
("ANA", "\nAna", F_["BIN"]), #?
("CAS", "\nCas", F_["BIN"]), #?
("PRD", "\nPrd", F_["BIN"]), #?
("Save", "\nSav", F_["BIN"]), #?
("RTC", "\nRtc", F_["BIN"]), #?
("RTP", "\nRtp", F_["BIN"]), #?
("Int3", "\nIn3", F_["BIN"]), #?
("MemBP", "\nBpm", F_["BIN"]), #?
("HWBP", "\nBph", F_["BIN"]), #?
]
Chunk_Types11 = dict(
[(e[1], e[0]) for e in chunk_types11] +
[(e[0], e[1]) for e in chunk_types11]
)
Chunk_Types20 = dict(
[(e[1], e[0]) for e in chunk_types20] +
[(e[0], e[1]) for e in chunk_types20]
)
Chunk_Types = {
11: Chunk_Types11,
20: Chunk_Types20
}
# no overlapping of formats yet so they're still merged
#
Chunk_Formats = dict(
[(e[2], e[0]) for e in chunk_types11] +
[(e[0], e[2]) for e in chunk_types11] +
[(e[2], e[0]) for e in chunk_types20] +
[(e[0], e[2]) for e in chunk_types20]
)
olly2cats = [
# used in DATA and NAME
#
('!', "UserLabel"),
('0', "UserComment"),
('1', "Import"),
('2', "APIArg"),
('3', "APICall"),
('4', "Member"),
('6', "Unk6"),
('*', "Struct"),
# only used in LSA ?
#
('`', 'mru_label'),
('a', 'mru_asm'),
('c', 'mru_comment'),
('d', 'watch'),
('e', 'mru_goto'),
|
('p', 'trace_condition1'),
('q', 'trace_condition2'),
('r', 'trace_condition3'),
('s', 'trace_condition4'),
('t', 'trace_command1'),
('u', 'trace_command2'),
('v', 'protocol_start'),
('w', 'proto | col_end'),
('Q', 'log_explanation'),
('R', 'log_condition'),
('S', 'log_expression'),
('U', 'mem_explanation'),
('V', 'mem_condition'),
('W', 'mem_expression'),
('Y', 'hbplog_explanation'),
('Z', 'hbplog_condition'),
('[', 'hbplog_expression'),
]
Olly2Cats = dict(
[(e[1], e[0]) for e in olly2cats] +
olly2cats)
return Udd_Formats, F_, Chunk_Types, Chunk_Formats, Olly2Cats
UDD_FORMATS, F_, CHUNK_TYPES, CHUNK_FORMATS, OLLY2CATS = init_mappings()
def binstr(data):
"""return a stream as hex sequence"""
return " ".join(["%02X" % ord(c) for c in data])
def elbinstr(data):
"""return a stream as hex sequence, ellipsed if too long"""
if len(data) < 10:
return binstr(data)
return "(%i) %s ... %s" % (len(data), binstr(data[:10]), binstr(data[-10:]))
class Error(Exception):
"""custom error class"""
pass
def crc32mpeg(buffer_):
"""computes the CRC32 MPEG of a buffer"""
crc = 0xffffffff
for c in buffer_:
octet = ord(c)
for i in range(8):
topbit = crc & 0x80000000
if octet & (0x80 >> i):
topbit ^= 0x80000000
crc <<= 1
if topbit:
crc ^= 0x4c11db7
crc &= 0xffffffff
return crc
def getcrc(filename):
"""returns the UDD crc of a file, by its filename"""
# probably not always correct
import pefile
pe = pefile.PE(filename)
sec = pe.sections[0]
align = pe.OPTIONAL_HEADER.SectionAlignment
data = sec.get_data(sec.VirtualAddress)
ActualSize = max(sec.Misc_VirtualSize, sec.SizeOfRawData)
data += "\0" * (ActualSize - len(data))
rem = ActualSize % align
if rem:
data += "\0" * (align - rem)
return crc32mpeg(data)
def getTimestamp(filename):
"""read LastModified timestamp and return as a binary buffer"""
import ctypes
mtime = ctypes.c_ulonglong(0)
h = ctypes.windll.kernel32.CreateFileA(
ctypes.c_char_p(filename),
0, 3, 0, 3, 0x80, 0)
ctypes.windll.kernel32.GetFileTime(h, 0,0, ctypes.pointer(mtime))
ctypes.windll.kernel32.CloseHandle(h)
return struct.pack("<Q", mtime.value)
def getFileInfo(filename):
"""return file's timestamp, crc and size"""
import os
import stat
time_ = getTimestamp(filename)
crc = getcrc(filename)
size = os.stat(filename)[stat.ST_SIZE]
return time_, crc, size
def read_next_chunk(f):
"""read next Udd chunk"""
ct = f.read(4)
size = struct.unpack("<I", f.read(4))[0]
cd = f.read(size)
return ct, cd
def write_chunk(f, ct, cd):
"""write a chunk"""
f.write(ct)
f.write(struct.pack("<I", len(cd)))
f.write(cd)
return
def make_chunk(ct, cd):
"""put together chunk types and data with a few checks"""
if len(ct) != 4:
raise Error("invalid chunk name length")
if len(cd) > 255:
raise Error("invalid chunk data length")
return [ct, cd]
def build_data(format_, info):
"""generate a chunk data depending on the format"""
if format_ == F_["DWORD"]:
return "%s" % (struct.pack("<I", info["dword"]))
if format_ in [F_["DDSTRING"], F_["MRUSTRING"]]:
return "%s%s\x00" % (struct.pack("<I", info["dword"]), info["text"])
else:
raise Error("format not supported for b |
#!/usr/bin/python
import sys, os
import select, socket
import usbcomm
import usb
_default_host = 'localhost'
_default_port = 23200
_READ_ONLY = select.POLLIN | select.POLLPRI
class Stream(object):
def __init__(self,
host=_default_host,
port=_default_port):
self.host = host
sel | f.port = port
self.usb = usbcomm.USBComm(idVendor=usbcomm.ids.Bayer, idProduct=usbcomm.ids.Bayer.Contour)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(0)
self.poller = select.poll()
self.fd_to_socket = {}
self.clients = []
def close(self):
print >>sys.stderr, '\nMUX > Closing...'
for client in self.clients:
client.close()
self.usb.close()
self.server.close()
print >>sys.stderr, 'MUX > Done! =)'
def a | dd_client(self, client):
print >>sys.stderr, 'MUX > New connection from', client.getpeername()
client.setblocking(0)
self.fd_to_socket[client.fileno()] = client
self.clients.append(client)
self.poller.register(client, _READ_ONLY)
def remove_client(self, client, why='?'):
try:
name = client.getpeername()
except:
name = 'client %d' % client.fileno()
print >>sys.stderr, 'MUX > Closing %s: %s' % (name, why)
self.poller.unregister(client)
self.clients.remove(client)
client.close()
def read(self):
self.sink = None
try:
data = self.usb.read( )
self.sink = data
except usb.core.USBError, e:
if e.errno != 110:
print e, dir(e), e.backend_error_code, e.errno
raise
return self.sink is not None
def flush(self):
if self.sink is not None:
for client in self.clients:
client.send(self.sink)
self.sink = None
def run(self):
try:
# self.tty.setTimeout(0) # Non-blocking
# self.tty.flushInput()
# self.tty.flushOutput()
# self.poller.register(self.usb.epout.bEndpointAddress, _READ_ONLY)
# self.fd_to_socket[self.usb.epout.bEndpointAddress] = self.usb
# print >>sys.stderr, 'MUX > Serial port: %s @ %s' % (self.device, self.baudrate)
print >>sys.stderr, 'MUX > usb port: %s' % (self.usb)
self.server.bind((self.host, self.port))
self.server.listen(5)
self.poller.register(self.server, _READ_ONLY)
self.fd_to_socket[self.server.fileno()] = self.server
print >>sys.stderr, 'MUX > Server: %s:%d' % self.server.getsockname()
print >>sys.stderr, 'MUX > Use ctrl+c to stop...\n'
while True:
events = self.poller.poll(500)
if self.read( ):
self.flush( )
for fd, flag in events:
# Get socket from fd
s = self.fd_to_socket[fd]
print fd, flag, s
if flag & select.POLLHUP:
self.remove_client(s, 'HUP')
elif flag & select.POLLERR:
self.remove_client(s, 'Received error')
elif flag & (_READ_ONLY):
# A readable server socket is ready to accept a connection
if s is self.server:
connection, client_address = s.accept()
self.add_client(connection)
# Data from serial port
elif s is self.usb:
data = s.read( )
for client in self.clients:
client.send(data)
# Data from client
else:
data = s.recv(80)
# Client has data
print "send to usb"
if data: self.usb.write(data)
# Interpret empty result as closed connection
else: self.remove_client(s, 'Got no data')
except usb.core.USBError, e:
print >>sys.stderr, '\nMUX > USB error: "%s". Closing...' % e
except socket.error, e:
print >>sys.stderr, '\nMUX > Socket error: %s' % e.strerror
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
if __name__ == '__main__':
s = Stream( )
s.run( )
|
# -*- coding: utf-8 -*-
# Generated by Django 1 | .10.2 on 2017-02-09 17:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scoping', '0047_auto_20170209_1626'),
]
operations = [
migrations.RemoveFie | ld(
model_name='query',
name='technology',
),
migrations.AddField(
model_name='query',
name='technology',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scoping.Technology'),
),
]
|
+ 1))
return read_code
naics_codes = naics_codes.dropna()
naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)
naics_codes = naics_codes.explode('NAICSCode')
# Add unclassified code which is used in some statistical variables.
naics_codes = naics_codes.append(
{
"NAICSCode": 99,
"Title": "Nonclassifiable"
}, ignore_index=True)
# Query for only two digit codes.
short_codes = naics_codes[naics_codes['NAICSCode'] < 100]
short_codes = short_codes.set_index("NAICSCode")
short_codes = short_codes['Title'].to_dict()
# Read in overview codes.
overview_codes = pd.read_csv(
"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv"
)
overview_codes.columns = ["NAICSCode", "Title"]
overview_codes = overview_codes.set_index("NAICSCode")
overview_codes = overview_codes['Title'].to_dict()
# Combine the two sources of codes.
NAICS_MAP = {}
combined_codes = short_codes
combined_codes.update(overview_codes)
# Rename industries into Pascal case.
for code, orig_name in combined_codes.items():
NAICS_MAP[str(code)] = standard_name_remapper(orig_name)
# Other edge cases.
NAICS_MAP['00'] = 'Unclassified'
return NAICS_MAP
# TODO(iancostello): Consider adding function memoization.
NAICS_MAP = _create_naics_map()
### True Constants
# Template of Stat Var MCF.
TEMPLATE_STAT_VAR = """
Node: dcid:{human_readable_dcid}
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
{CONSTRAINTS}"""
# Main query for stat vars. Combines across populations and observations
# to create statistical variables.
QUERY_FOR_ALL_STAT_VARS = """
SELECT DISTINCT
SP.population_type as populationType,
{CONSTRAINTS}
{POPULATIONS}
O.measurement_qualifier AS measurementQualifier,
O.measurement_denominator as measurementDenominator,
O.measured_prop as measuredProp,
O.unit as unit,
O.scaling_factor as scalingFactor,
O.measurement_method as measurementMethod,
SP.num_constraints as numConstraints,
CASE
WHEN O.measured_value IS NOT NULL THEN "measuredValue"
WHEN O.sum_value IS NOT NULL THEN "sumValue"
WHEN O.mean_value IS NOT NULL THEN "meanValue"
WHEN O.min_value IS NOT NULL THEN "minValue"
WHEN O.max_value IS NOT NULL THEN "maxValue"
WHEN O.std_deviation_value IS NOT NULL THEN "stdDeviationValue"
WHEN O.growth_rate IS NOT NULL THEN "growthRate"
WHEN O.median_value IS NOT NULL THEN "medianValue"
ELSE "Unknown"
END AS statType
FROM
`google.com:datcom-store-dev.dc_v3_clustered.StatisticalPopulation`
AS SP JOIN
`google.com:datcom-store-dev.dc_v3_clustered.Observation`
AS O
ON (SP.id = O.observed_node_key)
WHERE
O.type <> "ComparativeObservation"
AND SP.is_public
AND SP.prov_id NOT IN ({comma_sep_prov_blacklist})
"""
# Dataset blacklist.
_BIO_DATASETS = frozenset([
'dc/p47rsv3', # UniProt
'dc/0cwj4g1', # FDA_Pharmacologic_Class
'dc/5vxrbh3', # SIDER
'dc/ff08ks', # Gene_NCBI
'dc/rhjyj31', # MedicalSubjectHeadings
'dc/jd648v2', # GeneticVariantClinVar
'dc/x8m41b1', # ChEMBL
'dc/vbyjkh3', # SPOKESymptoms
'dc/gpv9pl2', # DiseaseOntology
'dc/8nwtbj2', # GTExSample0
'dc/t5lx1e2', # GTExSample2
'dc/kz0q1c2', # GTExSample1
'dc/8xcvhx', # GenomeAssemblies
'dc/hgp9hn1', # Species
'dc/9llzsx1', # GeneticVariantUCSC
'dc/f1fxve1', # Gene_RNATranscript_UCSC
'dc/mjgrfc', # Chromosome
'dc/h2lkz1', # ENCODEProjectSample
])
_MISC_DATASETS = frozenset([
'dc/93qydx3', # NYBG
'dc/g3rq1f1', # DeepSolar
'dc/22t2hr3', # EIA_860
'dc/zkhvp12', # OpportunityInsightsOutcomes
'dc/89fk9x3', # CollegeScorecard
])
# List of constraint prefixes to remove from certain properties.
CONSTRAINT_PREFIXES_TO_STRIP = {
'nativity': 'USC',
'age': 'USC',
'institutionalization': 'USC',
'educationStatus': 'USC',
'povertyStatus': 'USC',
'workExperience': 'USC',
'nativity': 'USC',
'race': ['USC', 'CDC', 'DAD'],
'employment': ['USC', 'BLS'],
'employmentStatus': ['USC', 'BLS'],
'schoolGradeLevel': 'NCES',
'patientRace': 'DAD'
}
# List of drug renamings. Note that some drugs are intentionally excluded.
DRUG_REMAPPINGS = {
'drug/dea/1100': 'Amphetamine',
'drug/dea/1105B': 'DlMethamphetamine',
'drug/dea/1105D': 'DMethamphetamine',
'drug/dea/1205': 'Lisdexamfetamine',
'drug/dea/1248': 'Mephedrone',
'drug/dea/1615': 'Phendimetrazine',
'drug/dea/1724': 'Methylphenidate',
'drug/dea/2010': 'GammaHydroxybutyricAcid',
'drug/dea/2012': 'FDAApprovedGammaHydroxybutyricAcidPreparations',
'drug/dea/2100': 'BarbituricAcidDerivativeOrSalt',
'drug/dea/2125': 'Amobarbital',
'drug/dea/2165': 'Butalbital',
'drug/dea/2270': 'Pentobarbital', # Intentionally duplicated
'drug/dea/2285': 'Phenobarbital', #
'drug/dea/2315': 'Secobarbital',
'drug/dea/2765': 'Diazepam',
'drug/dea/2783': 'Zolpidem',
'drug/dea/2885': 'Lorazepam',
'drug/dea/4000': 'AnabolicSteroids',
'drug/dea/4187': 'Testosterone',
'drug/dea/7285': 'Ketamine',
'drug/dea/7315D': 'Lysergide',
'drug/dea/7365': 'MarketableOr | alDronabinol',
'drug/dea/7369': 'DronabinolGelCapsule',
'drug/dea/7370': 'Tetrahydrocannabinol',
'drug/dea/7377': 'Cannabicyclol',
'drug/dea/7379': 'Nabilone',
'drug/dea/7381': 'M | escaline',
'drug/dea/7400': '34Methylenedioxyamphetamine',
'drug/dea/7431': '5MethoxyNNDimethyltryptamine',
'drug/dea/7433': 'Bufotenine',
'drug/dea/7437': 'Psilocybin',
'drug/dea/7438': 'Psilocin',
'drug/dea/7455': 'PCE',
'drug/dea/7471': 'Phencyclidine',
'drug/dea/7540': 'Methylone',
'drug/dea/9010': 'Alphaprodine',
'drug/dea/9020': 'Anileridine',
'drug/dea/9041L': 'Cocaine',
'drug/dea/9046': 'Norcocaine',
'drug/dea/9050': 'Codeine',
'drug/dea/9056': 'EtorphineExceptHCl',
'drug/dea/9064': 'Buprenorphine',
'drug/dea/9120': 'Dihydrocodeine',
'drug/dea/9143': 'Oxycodone',
'drug/dea/9150': 'Hydromorphone',
'drug/dea/9168': 'Difenoxin',
'drug/dea/9170': 'Diphenoxylate',
'drug/dea/9180L': 'Ecgonine',
'drug/dea/9190': 'Ethylmorphine',
'drug/dea/9193': 'Hydrocodone',
'drug/dea/9200': 'Heroin',
'drug/dea/9220L': 'Levorphanol',
'drug/dea/9230': 'Pethidine',
'drug/dea/9250B': 'Methadone',
'drug/dea/9273D': 'BulkDextropropoxyphene',
'drug/dea/9300': 'Morphine',
'drug/dea/9313': 'Normorphine',
'drug/dea/9333': 'Thebaine',
'drug/dea/9411': 'Naloxone',
'drug/dea/9600': 'RawOpium',
'drug/dea/9630': 'TincuredOpium',
'drug/dea/9639': 'PowderedOpium',
'drug/dea/9652': 'Oxymorphone',
'drug/dea/9655': 'Paregoric',
'drug/dea/9665': '14Hydroxycodeinone',
'drug/dea/9668': 'Noroxymorphone',
'drug/dea/9670': 'PoppyStrawConcentrate',
'drug/dea/9737': 'Alfentanil',
'drug/dea/9739': 'Remifentanil',
'drug/dea/9740': 'Sufentanil',
'drug/dea/9743': 'Carfentanil',
'drug/dea/9780': 'Tapentadol',
'drug/dea/9801': 'Fentanyl',
}
# Exceptionally long and confusing cause of death names are manually renamed.
MANUAL_CAUSE_OF_DEATH_RENAMINGS = {
'ICD10/D50-D89': 'DiseasesOfBloodAndBloodFormingOrgansAndImmuneDisorders',
'ICD10/R00-R99': 'AbnormalNotClassfied',
'ICD10/U00-U99': 'SpecialCases',
'ICD10/V01-Y89': 'ExternalCauses'
}
# List of properties to perform a numerical quantity remap on.
NUMERICAL_QUANTITY_PROPERTIES_TO_REMAP = [
'income', 'age', 'householderAge', 'homeValue', 'dateBuilt', 'grossRent',
'numberOfRooms', 'numberOfRooms', 'householdSize', 'numberOfVehicles',
'propertyTax'
]
# Regex rules to apply to numerical quantity remap.
REGEX_NUMERICAL_QUANTITY_RENAMINGS = [
# [A-Za-z]+[0-9]+Onwards -> [0-9]+OrMore[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)Onwards$"),
lambda match: match.group(2) + "OrMore" + match.group(1)),
# [A-Za-z]+Upto[0-9]+ -> Upto[0-9]+[A-Za-z]+
(re |
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: commandline tool to send commands to configd (response to stdout)
"""
import argparse
import socket
import os.path
import traceback
import sys
import syslog
import time
from select import select
from modules import syslog_error, syslog_notice
__author__ = 'Ad Schellevis'
configd_socket_name = '/var/run/configd.socket'
configd_socket_wait = 20
def exec_config_cmd(exec_command):
""" execute command using configd socket
:param exec_command: command string
:return: string
"""
# Create and o | pen unix domain socket
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(con | figd_socket_name)
except socket.error:
syslog_error('unable to connect to configd socket (@%s)'%configd_socket_name)
print('unable to connect to configd socket (@%s)'%configd_socket_name, file=sys.stderr)
return None
try:
sock.send(exec_command.encode())
data = []
while True:
line = sock.recv(65536).decode()
if line:
data.append(line)
else:
break
return ''.join(data)[:-3]
except:
syslog_error('error in configd communication \n%s'%traceback.format_exc())
print ('error in configd communication %s, see syslog for details', file=sys.stderr)
finally:
sock.close()
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="execute multiple arguments at once", action="store_true")
parser.add_argument("-e", help="use as event handler, execute command on receiving input", action="store_true")
parser.add_argument("-d", help="detach the execution of the command and return immediately", action="store_true")
parser.add_argument("-q", help="run quietly by muting standard output", action="store_true")
parser.add_argument(
"-t",
help="threshold between events, wait this interval before executing commands, combine input into single events",
type=float
)
parser.add_argument("command", help="command(s) to execute", nargs="+")
args = parser.parse_args()
syslog.openlog("configctl")
# set a timeout to the socket
socket.setdefaulttimeout(120)
# check if configd socket exists
# (wait for a maximum of "configd_socket_wait" seconds for configd to start)
i=0
while not os.path.exists(configd_socket_name):
if i >= configd_socket_wait:
break
time.sleep(1)
i += 1
if not os.path.exists(configd_socket_name):
print('configd socket missing (@%s)'%configd_socket_name, file=sys.stderr)
sys.exit(-1)
# command(s) to execute
if args.m:
# execute multiple commands at once ( -m "action1 param .." "action2 param .." )
exec_commands=args.command
else:
# execute single command sequence
exec_commands=[' '.join(args.command)]
if args.e:
# use as event handler, execute configd command on every line on stdin
last_message_stamp = time.time()
stashed_lines = list()
while True:
rlist, _, _ = select([sys.stdin], [], [], args.t)
if rlist:
last_message_stamp = time.time()
r_line = sys.stdin.readline()
if len(r_line) == 0:
#EOFError. pipe broken?
sys.exit(-1)
stashed_lines.append(r_line)
if len(stashed_lines) >= 1 and (args.t is None or time.time() - last_message_stamp > args.t):
# emit event trigger(s) to syslog
for line in stashed_lines:
syslog_notice("event @ %.2f msg: %s" % (last_message_stamp, line))
# execute command(s)
for exec_command in exec_commands:
syslog_notice("event @ %.2f exec: %s" % (last_message_stamp, exec_command))
exec_config_cmd(exec_command=exec_command)
stashed_lines = list()
else:
# normal execution mode
for exec_command in exec_commands:
if args.d:
exec_command = '&' + exec_command
result=exec_config_cmd(exec_command=exec_command)
if result is None:
sys.exit(-1)
if not args.q:
print('%s' % (result.strip()))
|
from c2cgeoportal_admin.views.layertree import itemtypes_tables
itemtypes | _tables.update({
'lu_int_wms': 'lux_layer_internal_wms',
'lu_ext_wms': | 'lux_layer_external_wms',
})
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WIT | HOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
################################################################### | ########
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dcm.run import dcm
def recipe_dcm_run(config, auth_read, account, report_id, report_name):
"""Trigger a CM report run
Args:
auth_read (authentication) - Credentials used for reading data.
account (integer) - CM network id.
report_id (integer) - CM report id, empty if using name.
report_name (string) - CM report name, empty if using id instead.
"""
dcm(config, {
'auth':auth_read,
'report_run_only':True,
'report':{
'account':account,
'report_id':report_id,
'name':report_name
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Trigger a CM report run
1. Specify an account id.
2. Specify either report name or report id to run.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-account", help="CM network id.", default='')
parser.add_argument("-report_id", help="CM report id, empty if using name.", default='')
parser.add_argument("-report_name", help="CM report name, empty if using id instead.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dcm_run(config, args.auth_read, args.account, args.report_id, args.report_name)
|
# This file is pa | rt of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any lat | er version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.cephalopod.controllers import RHCephalopod, RHCephalopodSync, RHSystemInfo
from indico.web.flask.wrappers import IndicoBlueprint
cephalopod_blueprint = _bp = IndicoBlueprint('cephalopod', __name__, template_folder='templates',
virtual_template_folder='cephalopod')
_bp.add_url_rule('/admin/community-hub/', 'index', RHCephalopod, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/community-hub/sync', 'sync', RHCephalopodSync, methods=('POST',))
_bp.add_url_rule('/system-info', 'system-info', RHSystemInfo)
|
"""
@summary: Module contain matrix base classes
@author: CJ Grady
@version: 1.0
@status: alpha
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have receive | d a copy of the GNU General Public License
along with this program; if not, | write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
class Grid(object):
"""
@summary: Base class for Lifemapper grids. This class can be used with
uncompressed grids.
"""
# ...........................
def __init__(self, griddedData=None):
if griddedData is not None:
self._initFromGrid(griddedData)
else:
self.ySize = None
self.xSize = None
self.data = []
self.classes = set([])
# ...........................
def _initFromGrid(self, griddedData):
self.ySize = len(griddedData)
self.xSize = len(griddedData[0])
self.data = griddedData
self.findClasses()
# ...........................
def findClasses(self):
"""
@summary: Finds all of the unique classes in the data
"""
self.classes = set([])
for row in self.data:
for col in row:
self.classes.add(col)
# ...........................
def query(self, x, y):
return self.data[y][x]
# ...........................
def read(self, fn):
self.data = []
with open(fn) as f:
for line in f:
self.data.append([int(i) for i in line.split(' ')])
# ...........................
def write(self, fn):
with open(fn, 'w') as f:
for row in self.data:
f.write('%s\n' % ' '.join([str(i) for i in row]))
# .............................................................................
class _CompressedGrid(Grid):
# ...........................
def __init__(self):
raise Exception, "init must be implemented in sub class"
# ...........................
def query(self, x, y):
raise Exception, "Query must be implemented in sub class"
# ...........................
def read(self, fn):
raise Exception, "Read must be implemented in sub class"
# ...........................
def write(self, fn):
raise Exception, "Write must be implemented in sub class"
|
from pymuse.pipelinestages.pipeline_stage import PipelineStage
from pymuse.utils.stoppablequeue import StoppableQueue
from pymuse.signal import Signal
from pymuse.constants import PIPELINE_QUEUE_SIZE
class PipelineFork():
"""
This class is used to fork a Pipeline. Ex.: PipelineFork([stage1, stage2], [stage3]) fork the pipeline
in two paths and has two outputs (stage2 and stage3). It is used during the construction of Pipeline.
"""
def __init__(self, *branches):
self.forked_branches: list = list(branches)
class Pipeline():
"""
This class create a multithreaded pipeline. It automatically links together every contiguous stages.
E.g.: Pipeline(Signal(), PipelineStage(), PipelineFork([PipelineStage(), PipelineStage()], [PipelineStage()] ))
"""
def __init__(self, input_signal: Signal, *stages):
self._output_queues = []
self._stages: list = list(stages)
self._link_stages(self._stages)
self._stages[0]._queue_in = input_signal.signal_queue
def get_output_queue(self, queue_index=0) -> StoppableQueue:
"""Return a ref to the queue given by queue_index"""
return self._output_queues[queue_index]
def read_output_queue(self, queue_index=0):
"""Wait to read a data in a queue given by queue_index"""
return self._output_queues[queue_index].get()
def start(self):
"""Start all pipelines stages."""
self._start(self._stages)
def shutdown(self):
""" shutdowns every child thread (PipelineStage)"""
self._shutdown(self._stages)
def join(self):
"""Ensure every thread (PipelineStage) of the pipeline are done"""
for stage in self._stages:
stage.join()
def _link_pipeline_fork(self, stages: list, index: int):
for fork in stages[index].forked_branches:
stages[index - 1].add_queue_out(fork[0].queue_in)
self._link_stages(fork)
def _link_stages(self, stages: list):
for i in range(1, len(stages)):
if type(stages[i]) == PipelineFork:
self._link_pipeline_fork(stages, i)
else:
stages[i - 1].add_queue_out(stages[i].queue_in)
if issubclass(type(stages[-1]), PipelineStage):
output_queue = StoppableQueue(PIPELINE_QUEUE_SIZE)
stages[-1].add_queue_out(output_queue)
self._output_queues.append(output_queue)
def _start(self, stages: list):
for stage in stages:
if type(stage) == PipelineFork:
for forked_branch in stage.forked_branches:
| self._start(forked_branch)
else:
stage.start()
def _shutdown(self, stages: list):
for stage in stages:
if type(stage) == PipelineF | ork:
for forked_branch in stage.forked_branches:
self._shutdown(forked_branch)
else:
stage.shutdown()
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import abc
import os
import re
from trove.common import cfg
from trove.common import pagination
from trove.common import utils
from trove.guestagent.common import operating_sys | tem
CONF = cfg.CONF
def update_dict(updates, target):
"""Recursively update a target dictionary with given updates.
Updates are provided as a dictionary of key-value pairs
where a value can also be a nested dictionary in which case
its key is treated as a sub-sec | tion of the outer key.
If a list value is encountered the update is applied
iteratively on all its items.
:returns: Will always return a dictionary of results (may be empty).
"""
if target is None:
target = {}
if isinstance(target, list):
for index, item in enumerate(target):
target[index] = update_dict(updates, item)
return target
if updates is not None:
for k, v in updates.items():
if isinstance(v, abc.Mapping):
target[k] = update_dict(v, target.get(k, {}))
else:
target[k] = updates[k]
return target
def expand_dict(target, namespace_sep='.'):
"""Expand a flat dict to a nested one.
This is an inverse of 'flatten_dict'.
:seealso: flatten_dict
"""
nested = {}
for k, v in target.items():
sub = nested
keys = k.split(namespace_sep)
for key in keys[:-1]:
sub = sub.setdefault(key, {})
sub[keys[-1]] = v
return nested
def flatten_dict(target, namespace_sep='.'):
"""Flatten a nested dict.
Return a one-level dict with all sub-level keys joined by a namespace
separator.
The following nested dict:
{'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}
would be flattened to:
{'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}
"""
def flatten(target, keys, namespace_sep):
flattened = {}
if isinstance(target, abc.Mapping):
for k, v in target.items():
flattened.update(
flatten(v, keys + [k], namespace_sep))
else:
ns = namespace_sep.join(keys)
flattened[ns] = target
return flattened
return flatten(target, [], namespace_sep)
def build_file_path(base_dir, base_name, *extensions):
"""Build a path to a file in a given directory.
The file may have an extension(s).
:returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.expanduser(os.path.join(base_dir, file_name))
def to_bytes(value):
"""Convert numbers with a byte suffix to bytes.
"""
if isinstance(value, str):
pattern = re.compile(r'^(\d+)([K,M,G]{1})$')
match = pattern.match(value)
if match:
value = match.group(1)
suffix = match.group(2)
factor = {
'K': 1024,
'M': 1024 ** 2,
'G': 1024 ** 3,
}[suffix]
return int(round(factor * float(value)))
return value
def paginate_list(li, limit=None, marker=None, include_marker=False):
"""Paginate a list of objects based on the name attribute.
:returns: Page sublist and a marker (name of the last item).
"""
return pagination.paginate_object_list(
li, 'name', limit=limit, marker=marker, include_marker=include_marker)
def serialize_list(li, limit=None, marker=None, include_marker=False):
"""
Paginate (by name) and serialize a given object list.
:returns: A serialized and paginated version of a given list.
"""
page, next_name = paginate_list(li, limit=limit, marker=marker,
include_marker=include_marker)
return [item.serialize() for item in page], next_name
def get_filesystem_volume_stats(fs_path):
try:
stats = os.statvfs(fs_path)
except OSError:
raise RuntimeError("Filesystem not found (%s)" % fs_path)
total = stats.f_blocks * stats.f_bsize
free = stats.f_bfree * stats.f_bsize
# return the size in GB
used_gb = utils.to_gb(total - free)
total_gb = utils.to_gb(total)
output = {
'block_size': stats.f_bsize,
'total_blocks': stats.f_blocks,
'free_blocks': stats.f_bfree,
'total': total_gb,
'free': free,
'used': used_gb
}
return output
def get_conf_dir():
"""Get the config directory for the database related settings.
For now, the files inside the config dir are mainly for instance rebuild.
"""
mount_point = CONF.get(CONF.datastore_manager).mount_point
conf_dir = os.path.join(mount_point, 'conf.d')
if not operating_system.exists(conf_dir, is_directory=True, as_root=True):
operating_system.ensure_directory(conf_dir, as_root=True)
return conf_dir
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
from numpy import isclose
from dolfin import (assemble, dx, FiniteElement, FunctionSpace, inner, MixedElement, split, TestFunction,
TrialFunction, UnitSquareMesh, VectorElement)
from dolfin_utils.test import fixture as module_fixture
from rbnics.backends.dolfin import evaluate as _evaluate, ParametrizedTensorFactory
from rbnics.backends.dolfin.export import tensor_save
from rbnics.backends.dolfin.import_ import tensor_load
from rbnics.eim.utils.decorators import add_to_map_from_parametrized_expression_to_problem
# Meshes
@module_fixture
def mesh():
return UnitSquareMesh(10, 10)
# Forms: elliptic case
def generate_elliptic_linear_form_space(mesh):
return (FunctionSpace(mesh, "Lagrange", 2), )
def generate_elliptic_linear_form(V):
v = TestFunction(V)
return v * dx
def generate_elliptic_bilinear_form_space(mesh):
return generate_elliptic_linear_form_space(mesh) + generate_elliptic_linear_form_space(mesh)
def generate_elliptic_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
return u * v * dx
# Forms: mixed case
def generate_mixed_linear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
return (FunctionSpace(mesh, element), )
def generate_mixed_linear_form(V):
v = TestFunction(V)
(v_0, v_1) = split(v)
return v_0[0] * dx + v_0[1] * dx + v_1 * dx
def generate_mixed_bilinear_form_space(mesh):
return generate_mixed_linear_form_space(mesh) + generate_mixed_linear_form_space(mesh)
def generate_mixed_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
(u_0, u_1) = split(u)
(v_0, v_1) = split(v)
return inner(u_0, v_0) * dx + u_1 * v_1 * dx + u_0[0] * v_1 * dx + u_1 * v_0[1] * dx
# Forms: collapsed case
def generate_collapsed_linear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, )
def generate_collapsed_linear_form(V):
v = TestFunction(V)
return v[0] * dx + v[1] * dx
def generate_collapsed_bilinear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, U)
def generate_collapsed_bilinear_form(V, U):
u = TrialFunction(U)
(u_0, u_1) = split(u)
v = TestFunction(V)
return inner(u_0, v) * dx + u_1 * v[0] * dx
# Forms decorator
generate_form_spaces_and_forms = pytest.mark.parametrize("generate_form_space, generate_form", [
(generate_elliptic_linear_form_space, generate_elliptic_linear_form),
(generate_elliptic_bilinear_form_space, generate_elliptic_bilinear_form),
(generate_mixed_linear_form_space, generate_mixed_linear_form),
(generate_mixed_bilinear_form_space, generate_mixed_bilinear_form),
(generate_collapsed_linear_form_space, generate_collapsed_linear_form),
(generate_collapsed_bilinear_form_space, generate_collapsed_bilinear_form)
])
# Mock problem to avoid triggering an assert
class Problem(object):
mu = None
def evaluate(tensor):
add_to_map_from_parametrized_expression_to_problem(tensor, Problem())
return _evaluate(tensor)
# Prepare tensor storage for load
class Generator(object):
def __init__(self, form):
self._form = form
def zero_for_load(form):
tensor = assemble(form, keep_diagonal=True)
tensor.zero()
tensor.generator = Generator(form)
return tensor
# Tests
@generate_form_spaces_and_forms
def test_tensor_save(mesh, generate_form_space, generate_form, save_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
evaluated_tensor = evaluate(tensor)
tensor_save(evaluated_tensor, save_tempdir, "evaluated_tensor")
@generate_form_spaces_and_forms
def test_tensor_load(mesh, generate_form_space, generate_form, load_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
expected_evaluated_tensor = evaluate(tensor)
loaded_evaluated_tensor = zero_for_load(form)
tensor_load(loaded_evaluated_tensor, load_tempdir, "evaluated_tensor")
assert len(space) in (1, 2)
if len(space) == 1:
assert isc | lose(loaded_evaluated_ | tensor.get_local(), expected_evaluated_tensor.get_local()).all()
elif len(space) == 2:
assert isclose(loaded_evaluated_tensor.array(), expected_evaluated_tensor.array()).all()
@generate_form_spaces_and_forms
def test_tensor_io(mesh, generate_form_space, generate_form, tempdir):
test_tensor_save(mesh, generate_form_space, generate_form, tempdir)
test_tensor_load(mesh, generate_form_space, generate_form, tempdir)
|
# Generated by Dja | ngo 2.0.13 on 2019-08-10 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile_manager', '0004_auto_20190729_2101'),
]
operations = [
migrations.AddField(
model_name='profile',
name='get_messages_by_email',
field=models.BooleanField(default=True, help_text='If your teacher sends you a message, get an instance ema | il.'),
),
]
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.CharField')(max_length=128))
def backwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.BigIntegerField')())
models = {
'djangodocument.booleanindex': {
'Meta': {'object_name': 'BooleanIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'djangodocument.dateindex': {
'Meta': {'object_name': 'DateIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateField', [], {'null': 'True'})
},
'djangodocument.datetimeindex': {
'Meta': {'object_name': 'DateTimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'djangodocument.decimalindex': {
'Meta': {'object_name': 'DecimalIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10'})
},
'djangodocument.documentstore': {
'Meta': {'object_name': 'DocumentStore'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'djangodocument.floatindex': {
'Meta': {'object_name': 'FloatIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'djangodocument.integerindex': {
'Meta': {'object_name': 'IntegerIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'djangodocument.longindex': {
'Meta': {'object_name': 'LongIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
},
'djangodocument.registeredindex': {
'Meta': {'unique_together': "[('name', 'collection')]", 'object_name': 'RegisteredIndex'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'djangodocument.registeredindexdocument': {
'Meta': {'object_name': 'RegisteredIndexDocument'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'doc_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': "orm['djangodocument.RegisteredIndex']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'djangodocument.stringindex': {
'Meta': {'object_name': 'StringIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True'})
},
'djangodocument.textindex': {
'Meta': {'object_name': 'TextIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'djangodocument.timeindex': {
'Meta': {'object_name': 'TimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"} | ),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'Tru | e'}),
'value': ('django.db.models.fields.TimeField', [], {'null': 'True'})
}
}
complete_apps = ['djangodocument'] |
from .local | import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'temp.db',
'USER': '',
'PASSWORD': | '',
'HOST': '',
'PORT': '',
}
}
OPBEAT['APP_ID'] = None
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-23 11:29
from __future__ import unicode_literals
from django.db import migrations, models |
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0019_auto_20160517_2232'),
]
operations = [
migrations.AlterField(
model_name='festivo',
name='anotacion',
field=models.CharField(default=b'', max_len | gth=50),
),
]
|
# -*- cod | ing: utf8 -*-
#
# Created by 'myth' on 2/19/16
import matplotlib as | mpl
import settings
mpl.use('TkAgg')
|
import json
from flask import g, jsonify, request, current_app, url_for
from ..models import User
from .. import db
from . import main
from .authentication import auth_user
from .errors import bad_request, unauthorized, forbidden, not_found
"""read all"""
@main.route('/<token>/users/', methods=['GET'])
def get_users(token):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
| # get and return all:
users = User.query.all()
list_of_dicts = [json.loads(user.to_json()) for user in users]
return json.dumps(list_of_dicts)
"""read one"""
@main.route('/<token>/users/<int:id>/', methods=['GET'])
def get_user(token, id):
if not auth_user(token):
return unauthorized("You | have to be logged in to perform this action")
# get and return one with id:
user = User.query.get(id)
if user == None:
not_found("Resource not found");
return user.to_json()
"""create"""
@main.route('/users/', methods=['POST']) #sign-up
def new_user():
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user = User(username=username, email=email)
user.password = password
db.session.add(user)
db.session.commit()
# get auth_token for the user:
auth_token = user.generate_auth_token(3600*24)
# create and send response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
response["status"] = "success"
return jsonify(response)
"""update"""
@main.route('/<token>/users/<int:id>/', methods=['PUT'])
def update_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user.username = username
user.email = email
user.password = password
db.session.add(user)
db.session.commit()
# create and send response
response = {}
response["user"] = user.to_json()
response["status"] = "success"
return jsonify(response)
"""delete"""
@main.route('/<token>/users/<int:id>/', methods=["DELETE"])
def delete_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# delete and commit
db.session.delete(user)
db.session.commit()
# ! delete associated watchs and checks!
#
# create and send response
response = {}
response["status"] = "success"
return json.dumps(response)
"""login"""
@main.route('/users/login/', methods=['POST'])
def login():
# get credentials
email = request.form.get('email')
password = request.form.get('password')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
# check for a user with matching credentials
user = User.query.filter_by(email=email).first()
if user == None or user.verify_password(password)==False:
return bad_request("Invalid email or password!")
# set the global current_user
g.current_user = user
# get auth_token for the user
auth_token = user.generate_auth_token(3600*24) #1day
# create response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
return jsonify(response)
|
import serial
import numpy as np
import json
from datetime import datetime
class ElectronicNose:
def __init__(self, devAdd='/dev/ttyUSB0', baudrate=115200/3, \
tmax = 1000, outputFile = '', numSensors = 8):
## Creating the serial object
self.Sensor = serial.Serial(devAdd, baudrate)
self.memory = np.empty((0, numSensors + 2 + 1))
## File to store samples
if outputFile != '':
self.outfile = open(outputFile, 'a')
else:
self.outfile = []
## Writing the parameters
Vparam = '54'
if False: self.Sensor.write('P000' + 8*Vparam )
return
def save(self, filename):
np.save(filename, self.memory)
return
def closeConnection(self):
self.Sensor.close()
return
def forget(self):
self.memory = np.empty( (0, self.memory.shape[1] ) )
return
def refresh(self, nmax):
self.t[:self.tMax - nmax] = self.t[nmax:]
self.S[:self.tMax - nmax,:] = self.S[nmax:,:]
return
def sniff(self, nsamples=5):
# Flushing to ensure time precision
self.Sensor.flush()
# Possibly getting partial line -- this will be discarded
self.Sensor.readline()
avg = np.zeros( (1,11) )
nsamples_ = 0
for j in range(nsamples):
r = self.Sensor.readline()
if len(r) == 44:
nsa | mples_ += 1
avg[0,1:] += self.convert( r.split('\rV')[1].split('\n')[0][8:39] )
if nsamples_ > 0:
avg = avg/float(nsamples_)
now = datetime.now()
avg[0,0] = now.hour*3600 + now.minute*60 + now.second + now.microsecond/1.e6
self.memory = np.concatenate( (self.memory, np.reshape(avg, (1,11)) ), axis=0 )
return
def convert(self, string):
s = np.ze | ros(10)
# Converting 8 sensors
for j in range(8):
s[j] = int( string[j*3:j*3+3] , 16 )
# Converting temperature and humidity
s[8] = int( string[24:28] , 16)
s[9] = int( string[28:31] , 16)
return s
if __name__ == "__main__":
# Instantiating the class
EN = ElectronicNose()
# Acquiring some data
EN.sniff(1000)
# Closing connection
EN.closeConnection()
|
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages',),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_gb_used_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20}}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 100GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_number_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20},
'volumes': {'available': 0}}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id)
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.assertMessageCount(res, count=0)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume_error_existing_snapshot(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
exc = self.exceptions.cinder.__class__(400,
"error: dependent snapshots")
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id). \
AndRaise(exc)
api.server_list(IsA(http | .HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.a | ssertMessageCount(res, error=1)
self.assertEqual(list(res.context['messages'])[0].message,
u'Unable to delete volume "%s". '
u'One or more snapshots depend on it.' %
volume.display_name)
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments(self):
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach', args=[volume.id])
res = self.client.get(url)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
2)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = False
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.R |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-20 01:24
from __future__ import unicode_literals
from django.db import migrations, model | s
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum_conversation', '0009_auto_20160925_2126'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='first_post',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='forum_conversation.Post', verbose_name='First post'),
| ),
]
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
| # local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=threa | d)
# local_best = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
# -*- coding: utf-8 -*-
#
# Point Tracker documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 25 14:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import os.path
from glob import glob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join('..', '..', 'src')))
possible_jsmath_paths = [ os.path.join(os.environ['HOME'],'apps', 'network', 'jsMath*'),
os.path.join(os.environ['HOME'], 'apps', 'science', 'jsMath*'), '/usr/share/jsmath' ]
for filt in possible_jsmath_paths:
for pth in glob(filt):
if os.path.exists(os.path.join(pth, 'jsMath.js')):
jsmath_path = pth
break
else:
continue
break
else:
print >> sys.stderr, "Error, couldn't find the path for jsmath, please edit the possible_jsmath_paths variable."
sys.exit(2)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.e | xt.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.jsmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# T | he encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Point Tracker'
copyright = u'2010, Barbier de Reuille, Pierre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PointTrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PointTracker.tex', u'Point Tracker Documentation',
u'Barbier de Reuille, Pierre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
-
def | __init__( | self):
-
|
=app.config['COOLDOWN'], max_retries=app.config['MAX_RETRIES'])
# If the code was not a good code, record the status as a 404 and raise an exception
finally:
db.session.commit()
return str(response.status_code)
def do_capture(status_code, the_record, base_url, model='capture', phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Create a screenshot, text scrape, from a provided html file.
This depends on phantomjs and an associated javascript file to perform the captures.
In the event an error occurs, an exception is raised and handled by the celery task
or the controller that called this method.
"""
# Make sure the the_record
db.session.add(the_record)
# If the capture is for static content, use a differnet PhantomJS config file
if model == 'static':
capture_name = the_record.filename
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/static.js',
app.config['LOCAL_STORAGE_FOLDER'],
capture_name]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)
else:
capture_name = grab_domain(the_record.url) + '_' + str(the_record.id)
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/capture.js',
the_record.url,
os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.html')
# Using subprocess32 backport, call phantom and if process hangs kill it
pid = subprocess32.Popen(service_args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = pid.communicate(timeout=phantomjs_timeout)
except subprocess32.TimeoutExpired:
pid.kill()
stdout, stderr = pid.communicate()
app.logger.error('PhantomJS Capture timeout at {} seconds'.format(phantomjs_timeout))
raise subprocess32.TimeoutExpired('phantomjs capture',phantomjs_timeout)
# If the subprocess has an error, raise an exception
if stderr or stdout:
raise Exception(stderr)
# Strip tags and parse out all text
ignore_tags = ('script', 'noscript', 'style')
with open(content_to_parse, 'r') as content_file:
content = content_file.read()
cleaner = clean.Cleaner()
content = cleaner.clean_html(content)
doc = LH.fromstring(content)
output = ""
for elt in doc.iterdescendants():
if elt.tag in ignore_tags:
continue
text = elt.text or ''
tail = elt.tail or ''
wordz = " ".join((text, tail)).strip('\t')
if wordz and len(wordz) >= 2 and not re.match("^[ \t\n]*$", wordz):
output += wordz.encode('utf-8')
# Since the filename format is different for static captures, update the filename
# This will ensure the URLs are pointing to the correct resources
if model == 'stati | c':
capture_name = capture_name.split('.')[0]
# Wite our html text that was parsed into our capture folder
parsed_text = open(os.path | .join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.txt'), 'wb')
parsed_text.write(output)
# Update the sketch record with the local URLs for the sketch, scrape, and html captures
the_record.sketch_url = base_url + '/files/' + capture_name + '.png'
the_record.scrape_url = base_url + '/files/' + capture_name + '.txt'
the_record.html_url = base_url + '/files/' + capture_name + '.html'
# Create a dict that contains what files may need to be written to S3
files_to_write = defaultdict(list)
files_to_write['sketch'] = capture_name + '.png'
files_to_write['scrape'] = capture_name + '.txt'
files_to_write['html'] = capture_name + '.html'
# If we are not writing to S3, update the capture_status that we are completed.
if not app.config['USE_S3']:
the_record.job_status = "COMPLETED"
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
else:
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
db.session.commit()
return files_to_write
def s3_save(files_to_write, the_record):
"""
Write a sketch, scrape, and html file to S3
"""
db.session.add(the_record)
# These are the content-types for the files S3 will be serving up
reponse_types = {'sketch': 'image/png', 'scrape': 'text/plain', 'html': 'text/html'}
# Iterate through each file we need to write to s3
for capture_type, file_name in files_to_write.items():
# Connect to S3, generate Key, set path based on capture_type, write file to S3
conn = boto.s3.connect_to_region(
region_name = app.config.get('S3_BUCKET_REGION_NAME'),
calling_format = boto.s3.connection.OrdinaryCallingFormat()
)
key = Key(conn.get_bucket(app.config.get('S3_BUCKET_PREFIX')))
path = "sketchy/{}/{}".format(capture_type, the_record.id)
key.key = path
key.set_contents_from_filename(app.config['LOCAL_STORAGE_FOLDER'] + '/' + file_name)
# Generate a URL for downloading the files
url = conn.generate_url(
app.config.get('S3_LINK_EXPIRATION'),
'GET',
bucket=app.config.get('S3_BUCKET_PREFIX'),
key=key.key,
response_headers={
'response-content-type': reponse_types[capture_type],
'response-content-disposition': 'attachment; filename=' + file_name
})
# Generate appropriate url based on capture_type
if capture_type == 'sketch':
the_record.sketch_url = str(url)
if capture_type == 'scrape':
the_record.scrape_url = str(url)
if capture_type == 'html':
the_record.html_url = str(url)
# Remove local files if we are saving to S3
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['sketch']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['scrape']))
os.remove(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], files_to_write['html']))
# If we don't have a finisher task is complete
if the_record.callback:
the_record.capture_status = 'S3_ITEMS_SAVED'
else:
the_record.capture_status = 'S3_ITEMS_SAVED'
the_record.job_status = 'COMPLETED'
db.session.commit()
def finisher(the_record):
"""
POST finished chain to a callback URL provided
"""
db.session.add(the_record)
verify_ssl = app.config['SSL_HOST_VALIDATION']
# Set the correct headers for the postback
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Connection': 'close'}
#proxy = {"http": "127.0.0.1:8080"}
req = post(the_record.callback, verify=verify_ssl, data=json.dumps(the_record.as_dict()), headers=headers)
# If a 4xx or 5xx status is recived, raise an exception
req.raise_for_status()
# Update capture_record and save to database
the_record.job_status = 'COMPLETED'
the_record.capture_status = 'CALLBACK_SUCCEEDED'
db.session.add(the_record)
db.session.commit()
@celery.task(name='celery_static_capture', ignore_result=True, bind=True)
def celery_static_capture(self, base_url, capture_id=0, retries=0, model="static"):
"""
Celery task used to create a sketch and scrape with a provided static HTML file.
Task also writes files to S3 or posts a callback depending on configuration file.
"""
static_record = Static.query.filter(Static.id == capture_id).first()
# Write the number of retries to the capture record
db.session.add(static_record)
static_record.retry = retries
db.session.commit()
# First perform the captures, then either write to S3, perform a callback, or neither
try:
# call the main capture function to retrieve sketches and scrapes
files_to_wr |
test_cdphotothread_parsetest_cdphotothread_parse(self):
# cd = MediaParser.partial_media_dict_from_url(
# "https://www.chiefdelphi.com/media/photos/41999")
# self.assertEqual(cd['media_type_enum'], MediaType.CD_PHOTO_THREAD)
# self.assertEqual(cd['foreign_key'], "41999")
# self.assertTrue(cd['details_json'])
# details = json.loads(cd['details_json'])
# self.assertEqual(details['image_partial'], "a88/a880fa0d65c6b49ddb93323bc7d2e901_l.jpg")
def test_imgur_parse(self):
imgur_img = MediaParser.partial_media_dict_from_url("http://imgur.com/zYqWbBh")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
imgur_img = MediaParser.partial_media_dict_from_url("http://i.imgur.com/zYqWbBh.png")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/r/aww"), None)
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/a/album"), None)
def test_fb_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("http://facebook.com/theuberbots")
self.assertEqual(result['media_type_enum'], MediaType.FACEBOOK_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'theuberbots')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.FACEBOOK_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.facebook.com/theuberbots')
def test_twitter_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://twitter.com/team1124")
self.assertEqual(result['media_type_enum'], MediaType.TWITTER_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'team1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.TWITTER_PROFILE])
self.assertEqual(result['profile_url'], 'https://twitter.com/team1124')
def test_youtube_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'uberbots1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(result['profile_url'], 'https://www.youtube.com/uberbots1124')
short_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(short_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(short_result['is_social'], True)
self.assertEqual(short_result['foreign_key'], 'uberbots1124')
self.assertEqual(short_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(short_ | result['profile_url'], 'https://www.youtube.com/uberbots1124')
gapps_result = MediaParser.partial_media_dict_from_url("https://www.youtube.c | om/c/tnt3102org")
self.assertEqual(gapps_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(gapps_result['is_social'], True)
self.assertEqual(gapps_result['foreign_key'], 'tnt3102org')
self.assertEqual(gapps_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(gapps_result['profile_url'], 'https://www.youtube.com/tnt3102org')
def test_github_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://github.com/frc1124")
self.assertEqual(result['media_type_enum'], MediaType.GITHUB_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'frc1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.GITHUB_PROFILE])
self.assertEqual(result['profile_url'], 'https://github.com/frc1124')
def test_instagram_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/4hteamneutrino")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], '4hteamneutrino')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.INSTAGRAM_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.instagram.com/4hteamneutrino')
def test_periscope_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.periscope.tv/evolution2626")
self.assertEqual(result['media_type_enum'], MediaType.PERISCOPE_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'evolution2626')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.PERISCOPE_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.periscope.tv/evolution2626')
def test_grabcad_link(self):
result = MediaParser.partial_media_dict_from_url("https://grabcad.com/library/2016-148-robowranglers-1")
self.assertEqual(result['media_type_enum'], MediaType.GRABCAD)
self.assertEqual(result['is_social'], False)
self.assertEqual(result['foreign_key'], '2016-148-robowranglers-1')
details = json.loads(result['details_json'])
self.assertEqual(details['model_name'], '2016 | 148 - Robowranglers')
self.assertEqual(details['model_description'], 'Renegade')
self.assertEqual(details['model_image'], 'https://d2t1xqejof9utc.cloudfront.net/screenshots/pics/96268d5c5e6c1b7fe8892f713813bb40/card.jpg')
self.assertEqual(details['model_created'], '2016-09-19T11:52:23Z')
def test_instagram_image(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/p/BUnZiriBYre/")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_IMAGE)
self.assertEqual(result['foreign_key'], "BUnZiriBYre")
details = json.loads(result['details_json'])
self.assertEqual(details['title'], "FRC 195 @ 2017 Battlecry @ WPI")
self.assertEqual(details['author_name'], '1stroboticsrocks')
self.assertIsNotNone(details.get('thumbnail_url', None))
def test_unsupported_url_parse(self):
self.assertEqual(MediaParser.partial_media_dict_from_url("http://foo.bar"), None)
class TestWebcastUrlParser(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
@classmethod
def tearDownClass(cls):
cls.testbed.deactivate()
def testTwitchUrl(self):
res = WebcastParser.webcast_dict_from_url("http://twitch.tv/frcgamesense")
self.assertIsNotNone(res)
self.assertEqual(res['type'], 'twitch')
self.assertEqual(res['channel'], 'frcgamesense')
unknown = WebcastParser.webcast_dict_from_url("http://twitch.tv/")
self.assertIsNone(unknown)
def testYouTubeUrl(self):
yt_long = WebcastParser.webcast_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertIsNotNone(yt_long)
self.assertEqual(yt_long['type'], 'youtube')
self.assertEqual(yt_long['channel'], 'I-IrVbsl_K8')
yt_short = WebcastParser.webcast_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertIsNotNone(yt_short)
self.assertEqual(yt_short['type'], 'youtube')
self.assertEqual(yt_short['channel'], 'I-IrVbsl_K8')
bad_long = WebcastParser.webcast_dict_from_url('"http://www.youtube.com/')
self.assertIsNone(bad_long)
bad_short = WebcastParser.webcast_dict_from_url("http://youtu.be/")
self.assertIsNone(bad_short)
def testUstream(self):
res = Web |
import numpy as np
from scipy.special import iv
def tapering_window(time,D,mywindow):
""" tapering_window returns the window for tapering a WOSA segment.
Inputs:
- time [1-dim numpy array of floats]: times along the WOSA segment.
- D [float]: Temporal length of the WOSA segment.
- mywindow [int]: Choice of tapering window:
-> 1: Square window
-> 2: Triangular window
-> 3: sin window
-> 4: sin**2 (Hanning) window
-> 5: sin**3 window
-> 6: sin**4 window
-> 7: Hamming window, defined as 0.54-0.46*np.cos(2.0*np.pi*time/D)
-> 8: 4-term Blackman-Harris window, with a0=0.35875 and a1=0.48829 and a2=0.14128 and a3=0.01168
-> 9: Kaiser-Bessel window, with parameter alpha=2.5
-> 10: Gaussian window, with standard dev. sigma=D/6.0
The terminology and formulas come from:
F. Harris. On the use of windows for harmonic analysis with the discrete fourier transform. Proceedings of the IEEE, 66(1):51-83, January 1978.
WARNING: Provide the vector 'time' such that for all k=0,...,time.size-1, we have time[k]>=0 and time[k]<=D
Outputs:
- tapering_window [1-dim numpy array of floats - size=time.size]: the tapering window.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir"""
T=time.size
if mywindow==1:
tapering_window=np.ones(T)
elif mywindow==2:
tapering_window=1.0-np.absolute(tim | e-D/2.0)/(D/2.0)
elif mywindow==3:
tapering_window=np.sin(np.pi*time/D)
elif mywindow==4:
tapering_window=(np.sin(np.pi*time/D))**2
elif mywindow==5:
tapering_window=(np.sin(np.pi*time/D))**3
elif mywindow==6:
tapering_window=(np.sin(np.pi*time/D))**4
elif mywindow==7:
tapering_window=0.54-0.46*np.cos(2.0*np.pi*time/D)
| elif mywindow==8:
a0=0.35875
a1=0.48829
a2=0.14128
a3=0.01168
tapering_window=a0-a1*np.cos(2.0*np.pi*time/D)+a2*np.cos(4.0*np.pi*time/D)-a3*np.cos(6.0*np.pi*time/D)
elif mywindow==9:
alpha=2.5
tapering_window=iv(0,np.pi*alpha*np.sqrt(1.0-((time-D/2.0)/(D/2.0))**2))
elif mywindow==10:
sig=D/6.0
tapering_window=np.exp(-(time-D/2.0)**2/2.0/sig**2)
else:
print "Error: The window number you entered is not valid. Check input variable 'mywindow'."
return
return tapering_window
|
fro | m ga_starters imp | ort * |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
progress test (count to 1000)
"""
from __future__ import absolute_import
from __future__ import print_function
from __ | future__ import unicode_literals
from ...utils.timing import TimedTestCase
from ..progress impor | t together
class test_progress(TimedTestCase):
def test_prog(self):
self.threshold = 0.1
together(1000)
|
uest
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_partition(
self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover a specific partition which is currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover a specific partition which is currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param partition_id: The identity of the partition.
:type partition_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/Partitions/{partitionId}/$/Recover'
path_format_arguments = {
'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_service_partitions(
self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover the specified service which is currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover the specified service which is currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param service_id: The identity of the service. This is typically the
full name of the service without the 'fabric:' URI scheme.
Starting from version 6.0, hierarchical names are delimited with the
"~" character.
For example, if the service name is "fabric:/myapp/app1/svc1", the
service identity would be "myapp~app1~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type service_id: str
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation con | figuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
| # Construct URL
url = '/Services/$/{serviceId}/$/GetPartitions/$/Recover'
path_format_arguments = {
'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def recover_system_partitions(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Indicates to the Service Fabric cluster that it should attempt to
recover the system services which are currently stuck in quorum loss.
Indicates to the Service Fabric cluster that it should attempt to
recover the system services which are currently stuck in quorum loss.
This operation should only be performed if it is known that the
replicas that are down cannot be recovered. Incorrect use of this API
can cause potential data loss.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/RecoverSystemPartitions'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not |
#!/usr/bin/env python
class PLUGIN_test_test2:
def __init__(self, screensurf, keylist, vartree):
self.screensurf=screensurf
self.keylist=keylist
#best practice to init keyid variables during init, and default them to "0" (the null keyid)
self.keyid="0"
def fork(self, tagobj):
return
#core object. should either return None, or pygame Rect.
#if Rect is returned, the system will attempt to parse the standard
#"act" component, and associated related attributes...
#you may also want to use the provided click events in place of the standard act component.
#if you want hoverkey to b | e active, you MUST return a Rect!
#onkey/offkey maski | ng is honored by the system regardless.
def core(self, tagobj):
if tagobj.tag=="test2":
self.xpos=int(tagobj.attrib.get("x"))
self.ypos=int(tagobj.attrib.get("y"))
#note: these core object tests are in blue
self.testrect=pygame.Rect(self.xpos, self.ypos, 60, 20)
pygame.draw.rect(self.screensurf, (0, 127, 255), self.testrect)
return self.testrect
#called every loop.
def pump(self):
return
#called on pygame mousebuttondown events
def click(self, event):
return
#called on pygame mousebuttonup events
def clickup(self, event):
return
#called upon page load
def pageclear(self):
return
#pause & resume can be useful for various things. such as properly extending timers. for that, its reccomended using the calculated seconds.
def pause(self, time):
print("plugin test2.dzup.py receved pause call.")
print(time)
#seconds referrs to a calculated seconds paused as a float.
def resume(self, seconds):
print("plugin test2.dzup.py receved resume call.")
print(seconds)
def keyup(self, event):
print("plugin test2.dzup.py receved KEYUP")
def keydown(self, event):
print("plugin test2.dzup.py receved KEYDOWN")
plugname="test plugin2"
plugclass=PLUGIN_test_test2
plugpath=None |
#!/usr/ | bin/env python
# -*- coding: UTF-8 -*- |
from app import create_app, celery
app = create_app()
|
import collections
g=open("depth_29.txt","w")
with open('depth_28.txt') as infile:
counts = collections.Counter(l.strip() for l in infile)
for line, count in counts.most_common( | ):
g.write(str(line))
#g.wri | te(str(count))
g.write("\n") |
from distutils.core import setup
setup(
name='dkcover | age',
version='0.0.0',
packages=[''],
url='https://github.com/thebjorn/dkcoverage',
| license='GPL v2',
author='bjorn',
author_email='bp@datakortet.no',
description='Run tests and compute coverage.'
)
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_shear_mite_broodling.iff"
res | ult.attribute_template_id = 9
result.stfName("monster_name","shear_mite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
| return result |
update_devs_dict(devs, dev, entry)
if path.startswith(SYSFS_PATH2):
os.path.join(path,"device","block:*")
dev = _extract_dev_name(os.path.join(path, 'device'))
if devs.has_key(dev):
continue
hbtl = os.path.basename(path)
(h,b,t,l) = hbtl.split(':')
entry = {'procname':proc, 'host':id, 'target':l}
update_devs_dict(devs, dev, entry)
dict['devs'] = devs
dict['adt'] = adt
return dict
def _get_driver_name(scsihost):
driver_name = 'Unknown'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'fnic_state')):
driver_name = 'fnic'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'lpfc_fcp_class')):
driver_name = 'lpfc'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, '84xx_fw_version')):
driver_name = 'qla2xxx'
if 'Unknown' == driver_name:
namepath = os.path.join(SYSFS_PATH1, scsihost, 'driver_name')
if not os.path.exists(namepath):
namepath = os.path.join(SYSFS_PATH1, scsihost, 'proc_name')
if os.path.exists(namepath):
try:
f = open(namepath, 'r')
line = f.readline()[:-1]
f.close()
if not line in ['<NULL>', '(NULL)', '']:
driver_name = line
except IOError:
pass
if 'Unknown' == driver_name:
ueventpath = os.path.join(SYSFS_PATH1, scsihost, 'uevent')
if os.path.exists(ueventpath):
try:
f = open(ueventpath, 'r')
for line in f:
if line.startswith('PHYSDEVDRIVER='):
driver_name = line.replace('PHYSDEVDRIVER=','').strip()
f.close()
except IOError:
pass
return driver_name
def _parseHostId(str):
id = str.split()
val = "%s:%s:%s" % (id[1],id[3],id[5])
return val.replace(',','')
def _genMPPHBA(id):
devs = scsiutil.cacheSCSIidentifiers()
mppdict = {}
for dev in devs:
item = devs[dev]
if item[1] == id:
arr = scsiutil._genArrayIdentifier(dev)
if not len(arr):
continue
try:
cmd = ['/usr/sbin/mppUtil', '-a']
for line in util.doexec(cmd)[1].split('\n'):
if line.find(arr) != -1:
rec = line.split()[0]
cmd2 = ['/usr/sbin/mppUtil', '-g',rec]
li = []
for newline in util.doexec(cmd2)[1].split('\n'):
if newline.find('hostId') != -1:
li.append(_parseHostId(newline))
mppdict[dev.split('/')[-1]] = li
except:
continue
return mppdict
def match_hbadevs(s, filterstr):
driver_name = _get_driver_name(s)
if match_host(s) and not match_blacklist( | driver_name) \
and ( filterstr == "any" or match_filterstr(filterstr, driver_name) ):
return driver_name
else:
return ""
def match_blacklist(driver_name):
return re.search("(" + ")|(".join(DRIVER_BLACKLIST) + | ")", driver_name)
def match_filterstr(filterstr, driver_name):
return re.search("^%s" % filterstr, driver_name)
def match_host(s):
return re.search("^host[0-9]", s)
def match_rport(s):
regex = re.compile("^rport-*")
return regex.search(s, 0)
def match_targets(s):
regex = re.compile("^target[0-9]")
return regex.search(s, 0)
def match_phy(s):
regex = re.compile("^phy-*")
return regex.search(s, 0)
def match_LUNs(s, prefix):
regex = re.compile("^%s" % prefix)
return regex.search(s, 0)
def match_dev(s):
regex = re.compile("^block:")
return regex.search(s, 0)
def _extract_dev_name(device_dir):
"""Returns the name of the block device from sysfs e.g. 'sda'"""
kernel_version = os.uname()[2]
if kernel_version.startswith('2.6'):
# sub-directory of form block:sdx/
dev = filter(match_dev, os.listdir(device_dir))[0]
# remove 'block:' from entry and return
return dev.lstrip('block:')
elif kernel_version.startswith('3.'):
# directory for device name lives inside block directory e.g. block/sdx
return _get_block_device_name_with_kernel_3x(device_dir)
else:
msg = 'Kernel version detected: %s' % kernel_version
raise xs_errors.XenError('UnsupportedKernel', msg)
def _get_block_device_name_with_kernel_3x(device_dir):
devs = glob.glob(os.path.join(device_dir, 'block/*'))
if len(devs):
# prune path to extract the device name
return os.path.basename(devs[0])
else:
return INVALID_DEVICE_NAME
def _extract_dev(device_dir, procname, host, target):
"""Returns device name and creates dictionary entry for it"""
dev = _extract_dev_name(device_dir)
entry = {}
entry['procname'] = procname
entry['host'] = host
entry['target'] = target
return (dev, entry)
def _add_host_parameters_to_adapter(dom, adapter, host_class, host_id,
parameters):
"""Adds additional information about the adapter to the the adapter node"""
host_path = os.path.join('/sys/class/', host_class, 'host%s' % (host_id))
if os.path.exists(host_path):
host_entry = dom.createElement(host_class)
adapter.appendChild(host_entry)
for parameter in parameters:
try:
filehandle = open(os.path.join(host_path, parameter))
parameter_value = filehandle.read(512).strip()
filehandle.close()
if parameter_value:
entry = dom.createElement(parameter)
host_entry.appendChild(entry)
text_node = dom.createTextNode(parameter_value)
entry.appendChild(text_node)
except IOError:
pass
def scan(srobj):
systemrootID = util.getrootdevID()
hbadict = srobj.hbadict
hbas = srobj.hbas
dom = xml.dom.minidom.Document()
e = dom.createElement("Devlist")
dom.appendChild(e)
if not os.path.exists(DEVPATH):
return dom.toprettyxml()
devs = srobj.devs
vdis = {}
for key in hbadict:
hba = hbadict[key]
path = os.path.join("/dev",key)
realpath = path
obj = srobj.vdi("")
try:
obj._query(realpath, devs[realpath][4])
except:
continue
# Test for root dev or existing PBD
if len(obj.SCSIid) and len(systemrootID) and util.match_scsiID(obj.SCSIid, systemrootID):
util.SMlog("Ignoring root device %s" % realpath)
continue
elif util.test_SCSIid(srobj.session, None, obj.SCSIid):
util.SMlog("SCSIid in use, ignoring (%s)" % obj.SCSIid)
continue
elif not devs.has_key(realpath):
continue
ids = devs[realpath]
obj.adapter = ids[1]
obj.channel = ids[2]
obj.id = ids[3]
obj.lun = ids[4]
obj.hba = hba['procname']
obj.numpaths = 1
if vdis.has_key(obj.SCSIid):
vdis[obj.SCSIid].numpaths += 1
vdis[obj.SCSIid].path += " [%s]" % key
elif obj.hba == 'mpp':
mppdict = _genMPPHBA(obj.adapter)
if mppdict.has_key(key):
item = mppdict[key]
adapters = ''
for i in item:
if len(adapters):
adapters += ', '
obj.numpaths += 1
adapters += i
if len(adapters):
obj.mpp = adapters
vdis[obj.SCSIid] = obj
else:
vdis[obj.SCSIid] = obj
for key in vdis:
obj = vdis[key]
d = dom.createElement("BlockDevice")
e.appendChild(d)
for attr in ['path','numpaths','SCSIid','vendor','seri |
# test rasl inner loop on simulated data
#
# pylint:disable=import-error
from __future__ import division, print_function
import numpy as np
from rasl.inner import inner_ialm
from rasl import (warp_image_gradient, EuclideanTransform,
SimilarityTransform, AffineTransform, ProjectiveTransform)
def setup_function(_):
np.random.seed(0)
np.set_printoptions(threshold=np.inf,
formatter={'float_kind':lambda x: "%.3f" % x})
def gauss_image(h=60, v=60):
"""a gaussian image as described in RASL and RPCA papers"""
return np.random.normal(0, 1.0, (h, v))
def image_noise(likeimg, p=0.1):
"""sparse noise as described in RASL and RPCA papers"""
sgn = np.random.choice((-1.0, 1.0), size=likeimg.shape)
return sgn * np.random.binomial(1, p, size=likeimg.shape)
def inner_aligned(Ttype, inset=10):
"""don't mess (much) with a stack of aligned images"""
N = 40
image0 = gauss_image()
insetT = Ttype().inset(image0.shape, inset)
Image = [image0 for _ in range(N)]
TI, J = zip(*[warp_image_gradient(insetT, image, normalize=True)
for image in Image])
_, _, dParamv = inner_ialm(TI, J, tol=1e-4)
# for this test, verify that all images have same dParamv
# (inner insists on stepping dParamv a small amount when all images
# are aligned, so image comparisons are no good)
assert np.allclose(dParamv, dParamv[0], atol=1e-3)
def test_inner_aligned_similarity():
inner_aligned(SimilarityTransform)
def test_inner_aligned_euclidean():
inner_aligned(EuclideanTransform)
def test_inner_aligned_affine():
inner_aligned(AffineTransform)
def test_inner_aligned_projective():
inner_aligned(ProjectiveTransform)
def inner_jittered(T, inset=10, rtol=1e-3, atol=0):
"""move a stack of jittered noisy images in the direction of aligned"""
image0 = gauss_image()
Image = [image0 + image_noise(image0, p=.05) for _ in T]
T = [tform.inset(image0.shape, inset) for tform in T]
TImage, J = zip(*[warp_image_gradient(tform, image, normalize=True)
for tform, image in zip(T, Image)])
_, _, dParamv = inner_ialm(TImage, J, tol=1e-4)
# does dParamv move towards alignment? check if stdev of
# parameters decreased.
before = np.array([t.paramv for t in T])
beforeStd = np.std(before, 0)
after = np.array([t.paramv + dparamv
for t, dparamv in zip(T, dParamv)])
afterStd = np.std(after, 0)
assert np.all(np.logical_or(afterStd < beforeStd,
np.isclose(after, before, rtol=rtol, atol=atol)))
def test_inner_jittered_euclidean():
N = 40
dtheta, dx, dy= .05, 1, 1
Jitters = [[(np | .random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([EuclideanTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_similarity():
N = 40
ds, dtheta, dx, dy= .05, .05, 1, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.ran | dom.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([SimilarityTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_affine():
N = 40
ds, dtheta, dx = .05, .05, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dx]
for _ in range(N)]
inner_jittered([AffineTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_projective():
# projective is a pain to test this way. the two projective
# parameters are badly conditioned and change too much in a single
# step. for now, set tolerance to disregard a wobbly step in the
# final two parameters, while assuring we converge the others.
N = 40
ds, dtheta, dx, dh = .05, .05, 1, 0.0005
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dh,
(np.random.random() * 2 - 1) * dh]
for _ in range(N)]
inner_jittered([ProjectiveTransform(paramv=jitter) for jitter in Jitters],
atol=.001)
|
# -*- | coding: utf-8 -*-
"""
Package of failing integer functions.
"""
from metaopt.objective.integer.failing.f import f as f
from metaopt.objective.integer.failin | g.g import f as g
FUNCTIONS_FAILING = [f, g]
|
# Copyright 2015 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestFloatingip(helpers.TestCase):
"""Checks that the user is able to allocate/release floatingip."""
def test_floatingip(self):
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_floatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
class TestFloatingipAssociateDisassociate(helpers.TestCase):
"""Checks that the user is able to Associate/Disassociate floatingip."""
def test_floatingip_associate_disassociate(self):
instance_name = helpers.gen_random_resource_name('instance',
timestamp=False)
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_active(instance_name))
instance_ipv4 = instances_page.get_fixed_ipv4(instance_name)
instance_info = "{} {}".format(instance_name, instance_ipv4)
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_floatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.associate_floatingip(floating_ip, instance_name,
instance_ipv4)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual(instance_info,
floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.disassociate_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingi | p_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
| floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.delete_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_deleted(instance_name))
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.contrib.auth import logout as django_logout
from restclients_core.exceptions import DataFailureException
from myuw.dao import is_action_disabled
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.emaillink import get_service_url_for_address
from myuw.dao.exceptions import (
EmailServiceUrlException, BlockedNetidErr)
from myuw.dao.gws import in_myuw_test_access_group
from myuw.dao.quicklinks import get_quicklink_data
from myuw.dao.card_display_dates import get_card_visibilty_date_values
from myuw.dao.messages import get_current_messages
from myuw.dao.term import add_term_data_to_context
from myuw.dao.user import get_updated_user, not_existing_user
from myuw.dao.user_pref import get_migration_preference
from myuw.dao.uwnetid import get_email_forwarding_for_current_user
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_invalid_netid_response, log_page_view, log_exception)
from myuw.logger.session_log import (
log_session, is_native, log_session_end)
from myuw.util.settings import (
get_google_search_key, get_google_analytics_key, get_django_debug,
get_logout_url, no_access_check)
from myuw.views import prefetch_resources, get_enabled_features
from myuw.views.error import (
unknown_uwnetid, no_access, blocked_uwnetid, pws_error_404)
from django.contrib.auth.decorators import login_required
logger = logging.getLogger(__name__)
def page(request,
template,
context=None,
prefetch=True,
add_quicklink_context=False):
if context is None:
context = {}
timer = Timer()
try:
user = get_updated_user(request)
except DataFailureException as ex:
log_exception(logger, "PWS error", traceback)
if ex.status == 404:
if not_existing_user(request):
return unknown_uwnetid()
return pws_error_404()
return render(request, '500.html', status=500)
try:
if not can_access_myuw(request):
return no_access()
except DataFailureException:
log_exception(logger, "GWS error", traceback)
return render(request, '500.html', status=500)
netid = user.uwnetid
context["user"] = {
"netid": netid,
"isHybrid": is_native(request),
}
if prefetch:
# Some pages need to prefetch before this point
failure = try_prefetch(request, template, context)
if failure:
return failure
try:
affiliations = get_all_affiliations(request)
except BlockedNetidErr:
django_logout(request)
return blocked_uwnetid()
except DataFailureException as err:
log_exception(logger, err, traceback)
return render(request, '500.html', status=500)
user_pref = get_migration_preference(request)
log_session(request)
context["user"]["session_key"] = request.session.session_key
context["home_url"] = "/"
context["err"] = None
context["user"]["affiliations"] = affiliations
banner_messages = []
for message in get_current_messages(request):
banner_messages.append(message.message_body)
context["banner_messages"] = banner_messages
context["display_onboard_message"] = user_pref.display_onboard_message
context["display_pop_up"] = user_pref.display_pop_up
context["disable_actions"] = is_action_disabled()
_add_email_forwarding(request, context)
try:
context["card_display_dates"] = get_card_visibilty_date_values(request)
add_term_data_to_context(request, context)
except DataFailureException:
log_exception(logger, "SWS term data error", traceback)
context['enabled_features'] = get_enabled_features()
context['google_search_key'] = get_google_search_key()
context['google_analytics_key'] = get_google_analytics_key()
context['google_tracking_enabled'] = not get_django_debug()
if add_quicklink_context:
_add_quicklink_context(request, context)
log_page_view(timer, request, template)
return render(request, template, context)
def try_prefetch(request, template, context):
try:
prefetch_resources(request,
prefetch_migration_preference=True,
prefetch_enrollment=True,
prefetch_group=True,
prefetch_instructor=True,
prefetch_sws_person=True)
except DataFailureException:
log_exception(logger, "prefetch error", traceback)
context["webservice_outage"] = True
return render(request, template, context)
return
@login_required
def logout(request):
log_session_end(request)
django_logout | (request) # clear the session data
if is_native(request):
return HttpResponse()
# Redirects to authN service logout page
return HttpResponseRedirect(get_logout_url())
def _add_quicklink_context(request, context):
link_data = get_quicklink_data(request)
for key in link_data:
context[key] = link_data[key]
def can_access_myuw(request):
return (no_access_check() or in_myuw_test_access_group(request))
|
def _add_email_forwarding(request, context):
my_uwemail_forwarding = get_email_forwarding_for_current_user(request)
c_user = context["user"]
if my_uwemail_forwarding and my_uwemail_forwarding.is_active():
try:
c_user['email_forward_url'] = get_service_url_for_address(
my_uwemail_forwarding.fwd)
return
except EmailServiceUrlException:
logger.error('No email url for {}'.format(
my_uwemail_forwarding.fwd))
return # MUWM-4700
c_user['email_forward_url'] = None
c_user['email_error'] = True
|
import logging
from stubo.ext.xmlutils import XPathValue
from stubo.ext.xmlexit import XMLManglerExit
log = logging.getLogger(__name | __)
elements = dict(year=XPathValue('//dispatchTime/dateTime/year'),
month=XPathValue('//dispatchTime/dateTi | me/month'),
day=XPathValue('//dispatchTime/dateTime/day'),
hour=XPathValue('//dispatchTime/dateTime/hour'),
minutes=XPathValue('//dispatchTime/dateTime/minutes'),
seconds=XPathValue('//dispatchTime/dateTime/seconds'))
attrs = dict(y=XPathValue('//dispatchTime/date/@year'),
m=XPathValue('//dispatchTime/date/@month'),
d=XPathValue('//dispatchTime/date/@day'))
ignore = XMLManglerExit(elements=elements, attrs=attrs)
def exits(request, context):
return ignore.get_exit(request, context)
|
authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['network'],
'db_table': 'l_b_reseau',
'verbose_name': 'Network',
'verbose_name_plural': 'Networks',
},
),
migrations.CreateModel(
name='Path',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('geom_3d', django.contrib.gis.db.models.fields.GeometryField(dim=3, default=None, editable=False, srid=settings.SRID, null=True, spatial_index=False)),
('length', models.FloatField(db_column='longueur', default=0.0, editable=False, blank=True, null=True, verbose_name='3D Length')),
('ascent', models.IntegerField(db_column='denivelee_positive', default=0, editable=False, blank=True, null=True, verbose_name='Ascent')),
('descent', models.IntegerField(db_column='denivelee_negative', default=0, editable=False, blank=True, null=True, verbose_name='Descent')),
('min_elevation', models.IntegerField(db_column='altitude_minimum', default=0, editable=False, blank=True, null=True, verbose_name='Minimum elevation')),
('max_elevation', models.IntegerField(db_column='altitude_maximum', default=0, editable=False, blank=True, null=True, verbose_name='Maximum elevation')),
('slope', models.FloatField(db_column='pente', default=0.0, editable=False, blank=True, null=True, verbose_name='Slope')),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False)),
('geom_cadastre', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False, null=True, editable=False)),
('valid', models.BooleanField(default=True, help_text='Approved by manager', verbose_name='Validity', db_column='valide')),
('visible', models.BooleanField(default=True, help_text='Shown in lists and maps', verbose_name='Visible', db_column='visible')),
('name', models.CharField(db_column='nom', max_length=20, blank=True, help_text='Official name', null=True, verbose_name='Name')),
('comments', models.TextField(help_text='Remarks', null=True, verbose_name='Comments', db_column='remarques', blank=True)),
('departure', models.CharField(db_column='depart', default='', max_length=250, blank=True, help_text='Departure place', null=True, verbose_name='Departure')),
('arrival', models.CharField(db_column='arrivee', default='', max_length=250, blank=True, help_text='Arrival place', null=True, verbose_name='Arrival')),
('eid', models.CharField(max_length=128, null=True, verbose_name='External id', db_column='id_externe', blank=True)),
('comfort', models.ForeignKey(related_name='paths', on_delete=django.db.models.deletion.CASCADE, db_column='confort', blank=True, to='core.Comfort', null=True, verbose_name='Comfort')),
('networks', models.ManyToManyField(related_name='paths', db_table='l_r_troncon_reseau', verbose_name='Networks', to='core.Network', blank=True)),
],
options={
'db_table': 'l_t_troncon',
'verbose_name': 'Path',
'verbose_name_plural': 'Paths',
},
bases=(geotrek.common.mixins.AddPropertyMixin, mapentity.models.MapEntityMixin, models.Model),
),
migrations.CreateModel(
name='PathAggregation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_position', models.FloatField(verbose_name='Start position', db_column='pk_debut', db_index=True)),
('end_position', models.FloatField(verbose_name='End position', db_column='pk_fin', db_index=True)),
('order', models.IntegerField(default=0, null=True, verbose_name='Order', db_column='ordre', blank=True)),
('path', models.ForeignKey(related_name='aggregations', on_delete=django.db.models.deletion.DO_NOTHING, db_column='troncon', verbose_name='Path', to='core.Path')),
],
options={
'ordering': ['order'],
'db_table': 'e_r_evenement_troncon',
'verbose_name': 'Path aggregation',
'verbose_name_plural': 'Path aggregations',
},
),
migrations.CreateModel(
| name='PathSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=50, verbose_name='Source')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_str | ucture_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['source'],
'db_table': 'l_b_source_troncon',
'verbose_name': 'Path source',
'verbose_name_plural': 'Path sources',
},
),
migrations.CreateModel(
name='Stake',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('stake', models.CharField(max_length=50, verbose_name='Stake', db_column='enjeu')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['id'],
'db_table': 'l_b_enjeu',
'verbose_name': 'Maintenance stake',
'verbose_name_plural': 'Maintenance stakes',
},
),
migrations.CreateModel(
name='Topology',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('deleted', models.BooleanField(default=False, verbose_name='Deleted', editable=False, db_column='supprime')),
('geom_3d', django.contrib.gis.db.models.fields.GeometryField(dim=3, default=None, editable=False, srid=settings.SRID, null=True, spatial_index=False)),
('length', models.FloatField(db_column='longueur', default=0.0, editable=False, blank=True, null=True, verbose_name='3D Length')),
('ascent', models.IntegerField(db_column='denivelee_positive', default=0, editable=False, blank=True, null=True, verbose_name='Ascent')),
('descent', models.IntegerField(db_column='denivelee_negative', default=0, editable=False, blank=True, null=True, verbose_name='Descent')),
('min_elevation', models.IntegerField(db_column='altitude_minimum', default=0, editable=False, blank=True, null=True, verbose_name='Minimum elevation')),
('max_elevation', models.IntegerField(db_column='altitude_maximum', default=0, editable=False, blank=True, null=True, verbose_name='Maximum elevation')),
('slope', models.FloatField(db_column='pente', default=0.0, editable=False, blank=True, null=True, verbose_name='Slope')),
('offset', models.FloatField(default=0.0, |
a = 'sdlbapm'
b = 'alam' |
for d in a:
p | rint d + b
|
import unittest
from datetime import datetime
import tempfile
import os
from due.agent import Agent
from due.episode import Episode
from due.event import Event
from due.persistence import serialize, deserializ | e
from due.models.tfidf import TfIdfAgent
from due.models.dummy import DummyAgent
class TestTfIdfAgent(unittest.TestCase):
def test_save_load(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
saved_agent = agent.save()
with tempfile.Tempora | ryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'serialized_tfidf_agent.due')
serialize(saved_agent, path)
loaded_agent = Agent.load(deserialize(path))
assert agent.parameters == loaded_agent.parameters
assert agent._normalized_past_utterances == loaded_agent._normalized_past_utterances
assert [e.save() for e in loaded_agent._past_episodes] == [e.save() for e in agent._past_episodes]
expected_utterance = agent._process_utterance('aaa bbb ccc mario')
loaded_utterance = loaded_agent._process_utterance('aaa bbb ccc mario')
assert (agent._vectorizer.transform([expected_utterance]) != loaded_agent._vectorizer.transform([loaded_utterance])).nnz == 0
assert (agent._vectorized_past_utterances != loaded_agent._vectorized_past_utterances).nnz == 0
assert agent.utterance_callback(_get_test_episode())[0].payload, loaded_agent.utterance_callback(_get_test_episode())[0].payload
def test_utterance_callback(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
result = agent.utterance_callback(_get_test_episode())
self.assertEqual(result[0].payload, 'bbb')
def test_tfidf_agent(self):
cb = TfIdfAgent()
# Learn sample episode
sample_episode, alice, bob = _sample_episode()
cb.learn_episodes([sample_episode])
# Predict answer
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def test_agent_load(self):
sample_episode, alice, bob = _sample_episode()
cb = TfIdfAgent()
cb.learn_episodes([sample_episode])
test_dir = tempfile.mkdtemp()
test_path = os.path.join(test_dir, 'test_agent_load.pkl')
serialize(cb.save(), test_path)
loaded_cb = Agent.load(deserialize(test_path))
self.assertIsInstance(loaded_cb, TfIdfAgent)
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = loaded_cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def _get_train_episodes():
result = []
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'bbb'),
Event(Event.Type.Utterance, datetime.now(), 'a', 'ccc'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'ddd')
]
result.append(e)
e = Episode('1', '2')
e.events = [
Event(Event.Type.Utterance, datetime.now(), '1', '111'),
Event(Event.Type.Utterance, datetime.now(), '2', '222'),
Event(Event.Type.Utterance, datetime.now(), '1', '333'),
Event(Event.Type.Utterance, datetime.now(), '2', '444')
]
result.append(e)
return result
def _get_test_episode():
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
]
return e
def _sample_episode():
alice = DummyAgent('alice')
bob = DummyAgent('bob')
result = alice.start_episode(bob)
alice.say("Hi!", result)
bob.say("Hello", result)
alice.say("How are you?", result)
bob.say("Good thanks, and you?", result)
alice.say("All good", result)
return result, alice, bob
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unicodedata
from urlparse import urlparse
from threading import Thread
import httplib, sys
from Queue import Q | ueue
import itertools
import codecs
import csv
import sys
import ssl
import re
if len(sys.argv) < 3:
print "Usage: %s <csv database> <out csv>" % (sys.argv[0])
exit()
# Unicode CSV reader
# http://stackoverflow.com/a/6187936
class UnicodeCsvReader(object):
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_rea | der = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
# read and split the csv row into fields
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding) for cell in row]
@property
def line_num(self):
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
# Remove particles and parenthesis in names
def cleanNames(names):
filtered_names = []
for word in names:
if len(word) and word[0].lower() != word[0]:
filtered_names.append(word)
return filtered_names
# Strips accents from a unicode string
def stripAccents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
# Generates all 2+ permutations of the given array
def allCombinations(tab):
out = []
for n in range(2, len(tab) + 1):
for comb in itertools.combinations(tab, n):
out.append(" ".join(comb))
return out
# Cycles through available urls and returns the next one in the list
def getNextBaseURL():
out = getNextBaseURL.urllist[getNextBaseURL.counter % len(getNextBaseURL.urllist)]
getNextBaseURL.counter += 1
return out
getNextBaseURL.counter = 0
getNextBaseURL.urllist = [l.strip() for l in open("urls.txt", "r")]
def fetchHandles(ourl, handles):
try:
url = urlparse(ourl)
conn = httplib.HTTPSConnection(url.netloc, context=ssl._create_unverified_context())
conn.request("GET", ourl)
res = conn.getresponse()
if res.status != 200:
print res.reason, ourl
return
for line in csv.reader((l for l in res.read().split("\n")[1:])):
if len(line) < 2:
continue
match = re.match('https?://twitter.com/(\w+)[^/]*$', line[1])
if match:
handle = match.group(1)
if handle not in handles:
handles.append(handle)
except Exception, e:
print "Error(%s): %s" % (ourl, e)
exit()
return
def doQueries():
base = getNextBaseURL()
while True:
names, region, party = q.get()
clean_names = cleanNames(stripAccents(names).split(" "))
handles = []
for comb in allCombinations(clean_names):
query = comb.replace(" ", "+") + "+" + region + "+" + party + "+site:twitter.com"
url = base + "/?format=csv&q=" + query
fetchHandles(url, handles)
with codecs.open(sys.argv[2], "a", "utf-8") as out:
out.write("%s, %s\n" % (names, handles))
print "%s, %s" % (names, handles)
q.task_done()
concurrent = 50
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doQueries)
t.daemon = True
t.start()
try:
with open(sys.argv[1], 'rb') as csvfile:
first = True
for line in UnicodeCsvReader(csvfile):
if first:
first = False
continue
names = line[0]
region = stripAccents(line[3]).replace(" ", "+")
party = stripAccents(line[5]).replace(" ", "+")
if party == "C's" or party == u"C´s":
party = "Ciudadanos"
q.put((names, region, party))
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
#!/usr/local/sci/bi | n/python
# PYTHON2.7
# import TestLeap
# TestVal = TestLeap.TestLeap(year)
import numpy as np
def TestLeap(year):
'''function to test if a year is a leap year'''
'''returns 0.0 if it is a leap year'''
'''returns a non-zero number if it is not a leap year'''
'''ONLY WORKS WITH SCALARS!!!'''
# first test - is it divisible by 4?
leapoo = (year/4.) - np.round(year/4.)
# second test - if it is divisible by 100. then is it also divisible by 400?
if (((year/100. | ) - np.round(year/100.)) == 0.):
leapoo = leapoo + ((year/400.) - np.round(year/400.))
return leapoo
|
# coding=utf-8
import time
import json
import boto3
from botocore.errorfactory import ClientError
def lambda_handler(event, context):
instance_id = event.get('instance_id')
region_id = event.get('region_id', 'us-east-2')
image_name = 'beam-automation-'+time.strftime("%Y-%m-%d-%H%M%S", time.gmtime())
image_ids = {}
image_ids['us-east-2'] = create_ami(image_name, instance_id)
image_ids['us-east-1'] = copy_ami(image_name, image_ids['us-east-2'], 'us-east-1')
image_ids['us-west-2'] = copy_ami(image_name, image_ids['us-east-2'], 'us-west-2')
update_lambda(image_ids)
return json.dumps(image_ids)
def create_ami(image_name, instance_id):
ec2 = boto3.client('ec2',region_name='us-east-2')
res = ec2.create_image(InstanceId=instance_id,
Name=image_name)
wait4image(ec2, res['ImageId'])
ec2.terminate_instances(InstanceIds=[instance_id])
return res['ImageId']
def copy_ami(image_name, image_id, region):
ec2 = boto3.client('ec2',region_name=region)
res = ec2.copy_image(Name=image_name,
SourceImageId=image_id,
SourceRegion='us-east-2')
# wait4image(ec2, res['ImageId'])
return res['ImageId']
def wait4image(ec2, image_id):
waiter = ec2.get_waiter('image_available')
waiter.wait(Filters=[{'Name': 'state', 'Values': ['available']}],
ImageIds=[image_id])
def upda | te_lambda(image_ids):
lm = boto3.client('lambda')
en_var = lm.get_function_configuration(FunctionName='simulateBeam')['Environment']['Variables']
en_var.update({
'us_east_2_IMAGE_ID': image_ids['us-east-2'],
'us_east_1_IMAGE_ID': image_ids['us-east-1'],
'us_west_2_IMAGE_ID': image_ids['us-west-2'],
})
lm.update_function_configuration(
| FunctionName='simulateBeam',
Environment={
'Variables': en_var
}
)
def check_instance_id(instance_ids):
for reservation in ec2.describe_instances()['Reservations']:
for instance in reservation['Instances']:
if instance['InstanceId'] in instance_ids:
instance_ids.remove(instance['InstanceId'])
return instance_ids
def stop_instance(instance_ids):
return ec2.stop_instances(InstanceIds=instance_ids)
def terminate_instance(instance_ids):
return ec2.terminate_instances(InstanceIds=instance_ids)
|
not self.tar:
self.tar = tar
# return
else:
for i in tar:
self.tar.addfile(i)
def is_cached(self, filename):
if not self.tar:
return False
return filename in self.tar.getnames()
def _get_file(self, filename):
""" Download filename and return the TarInfo object """
if filename not in self.tar.getnames():
self.add_to_tar(filename)
return self.tar.getmember(filename)
def get_cfg_files(self):
cfg_files = []
for config_object, config_value in self.maincfg_values:
# Add cfg_file objects to cfg file list
if config_object == "cfg_file":
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
elif config_object == "cfg_dir":
absolut_path = self.abspath(config_value)
command = "find '%s' -type f -iname \*cfg" % (absolut_path)
stdin, stdout, stderr = self.ssh.exec_command(command)
raw_filelist = stdout.read().splitlines()
cfg_files += raw_filelist
else:
continue
if not self.is_cached(config_value):
self.add_to_tar(config_value)
return cfg_files
def isfile(self, path):
""" Behaves like os.path.isfile only, via ssh connection """
try:
copy = self._get_file(path)
return copy.isfile()
except IOError:
return False
def isdir(self, path):
""" Behaves like os.path.isdir only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISDIR(file_stat.st_mode)
except IOError:
return False
def islink(self, path):
""" Behaves like os.path.islink only, via ssh connection """
try:
file_stat = self.stat(path)
return stat.S_ISLNK(file_stat.st_mode)
except IOError:
return False
def readlink(self, path):
""" Behaves like os.readlink only, via ssh connection """
return self.ftp.readlink(path)
def stat(self, *args, **kwargs):
""" Wrapper around os.stat only, via ssh connection """
path = args[0]
if not self.is_cached(path):
self.add_to_tar(path)
if path not in self.tar.getnames():
raise IOError("No such file or directory %s" % path)
member = self.tar.getmember(path)
member.st_mode = member.mode
member.st_mtime = member.mtime
return member
def access(self, *args, **kwargs):
""" Wrapper around os.access only, via ssh connection """
return os.access(*args, **kwargs)
def exists(self, path):
""" Wrapper around os.path.exists only, via ssh connection """
try:
self.ftp.stat(path)
return True
except IOError:
return False
def listdir(self, *args, **kwargs):
""" Wrapper around os.listdir but via ssh connection """
stats = self.ftp.listdir_attr(*args, **kwargs)
for i in stats:
self.cached_stats[args[0] + "/" + i.filename] = i
files = map(lambda x: x.filename, stats)
return files
class MultiSite(Livestatus):
""" Wrapps around multiple Livesatus instances and aggregates the results
of queries.
Example:
>>> m = MultiSite()
>>> m.add_backend(path='/var/spool/nagios/livestatus.socket', name='local')
>>> m.add_backend(path='127.0.0.1:5992', name='remote')
"""
def __init__(self, *args, **kwargs):
super(MultiSite, self).__init__(*args, **kwargs)
self.backends = {}
def add_backend(self, path, name):
""" Add a new livestatus backend to this instance.
Arguments:
path (str): Path to file socket or remote address
name (str): Friendly shortname for this backend
"""
backend = Livestatus(
livestatus_socket_path=path,
nagios_cfg_file=self.nagios_cfg_file,
authuser=self.authuser
)
self.backends[name] = backend
def get_backends(self):
""" Returns a list of mk_livestatus instances
Returns:
list. List of mk_livestatus instances
"""
return self.backends
def get_backend(self, backend_name):
""" Return one specific backend that has previously been added
"""
if not backend_name:
return self.backends.values()[0]
try:
return self.backends[backend_name]
except KeyError:
raise ParserError("No backend found with name='%s'" % backend_name)
def query(self, query, *args, **kwargs):
""" Behaves like mk_livestatus.query() except results are aggregated from multiple backends
Arguments:
backend (str): If specified, fetch only data from this backend (see add_backend())
*args: Passed directly to mk_livestatus.query()
**kwargs: Passed directly to mk_livestatus.query()
"""
result = []
backend = kwargs.pop('backend', None)
# Special hack, if 'Stats' argument was provided to livestatus
# We have to maintain compatibility with old versions of livestatus
# and return single list with all results instead of a list of dicts
doing_stats = any(map(lambda x: x.startswith('Stats:'), args + (query,)))
# Iterate though all backends and run the query
# TODO: Make this multithreaded
for name, backend_instance in self.backends.items():
# Skip if a specific backend was requested and this is not it
if backend and backend != name:
continue
query_result = backend_instance.query(query, *args, **kwargs)
if doing_stats:
result = self._merge_statistics(result, query_result)
else:
for row in query_result:
row['backend'] = name
result.append(row)
return result
def _merge_statistics(self, list1, list2):
""" Merges multiple livestatus results into one result
Arguments:
list1 (list): List of integers
list2 (list): List of integers
Returns:
list. Aggregated results of list1 + list2
Example:
>>> result1 = [1,1,1,1]
>>> result2 = [2,2,2,2]
>>> Mul | tiSite()._merge_statistics(result1, result2)
[3, 3, 3, 3]
"""
if not list1:
return list2
if not list2:
return list1
number_of_columns = len(list1)
result = [0] * number_of_columns
for row in (list1, list2):
for i, colu | mn in enumerate(row):
result[i] += column
return result
def get_host(self, host_name, backend=None):
""" Same as Livestatus.get_host() """
backend = self.get_backend(backend)
return backend.get_host(host_name)
def get_service(self, host_name, service_description, backend=None):
""" Same as Livestatus.get_service() """
backend = self.get_backend(backend)
return backend.get_service(host_name, service_description)
def get_contact(self, contact_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contact(contact_name)
def get_contactgroup(self, contactgroup_name, backend=None):
""" Same as Livestatus.get_contact() """
backend = self.get_backend(backend)
return backend.get_contactgroup(contactgroup_name)
def get_servicegroup(self, servicegroup_name, backend=None):
""" Same as Livestatus.get_servicegroup() """
backend = self.get_backend(backend)
return backend.get_servicegroup(servicegroup_name)
def get_hostgroup(self, hostgroup_name, b |
from datetime import datetime, timedelta, timezone
from django.shortcuts import render
from django.core.management import call_command
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from fly_project import settings, constants
from api.models import SavingsGoal, CreditGoal, FinalGoal
def count_days_between(dt1, dt2):
"""Function will return an integer of day numbers between two dates."""
dt1 = dt1.replace(hour=0, minute=0, second=0, microsecond=0)
dt2 = dt2.replace(hour=0, minute=0, second=0, microsecond=0)
return (dt2 - dt1).days
def count_days_between_today_and(dt2):
# Detect whether the unlocked time has elapsed and load the appropriate
# UI associated with this.
now = datetime.now(timezone.utc) # Standardize date to a specific time-zone
# Count how many days are left from today to the unlocked date.
return count_days_between(now,dt2)
@login_required(login_url='/authentication')
def mygoals_page(request):
return render(request, 'mygoals/type/view.html',{
'settings': settings,
})
@login_required(login_url='/authentication')
def savings_goals_page(request):
# Check to see if we have the latest SavingsGoal set, if not then
# create a new goal here.
savings_goal = SavingsGoal.objects.get_latest(request.user.id)
if not savings_goal:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if savings_goal.is_closed == True:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if savings_goal.unlocks:
days_remaining = count_days_between_today_and(savings_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/savings/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/savings/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'savings_goal': savings_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def credit_goals_page(request):
# Check to see if we have the latest CreditGoal set, if not then
# create a new goal here.
credit_goal = CreditGoal.objects.get_latest(request.user.id)
if not credit_goal:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if credit_goal.is_closed == True:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if credit_goal.unlocks:
days_remaining = count_days_between_today_and(credit_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/credit/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/credit/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'credit_goal': credit_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def final_goal_page(request):
# Check to see if we have the latest FinalGoal set, if not then
# create a new goal here.
final_goal = FinalGoal.objects.get_latest(request.user.id)
if not final_goal:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check to see if the current FinalGoal has 'is_closed=True' which means
# we need to create a new final goal.
if final_goal.is_closed == True:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if final_goal.unlocks:
days_remaining = count_days_between_today_and(final_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/final/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/final/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'final_goal': final_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def goal_complete_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
| elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
return | render(request, 'mygoals/complete/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
@login_required(login_url='/authentication')
def goal_failed_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
# Evaulate the User's profile
call_command('evaluate_me', str(request.me.id))
return render(request, 'mygoals/failed/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language govern | ing permissions and
# limitations under the License.
import math
from typing import Callable, Tuple, TypeVar
T = TypeVar('T')
def _ | accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[bool, float]:
"""Calculates probability and draws if solution should be accepted.
Based on exp(-Delta*E/T) formula.
Args:
random_sample: Uniformly distributed random number in the range [0, 1).
cost_diff: Cost difference between new and previous solutions.
temp: Current temperature.
Returns:
Tuple of boolean and float, with boolean equal to True if solution is
accepted, and False otherwise. The float value is acceptance
probability.
"""
exponent = -cost_diff / temp
if exponent >= 0.0:
return True, 1.0
probability = math.exp(exponent)
return probability > random_sample, probability
def anneal_minimize(
initial: T,
cost_func: Callable[[T], float],
move_func: Callable[[T], T],
random_sample: Callable[[], float],
temp_initial: float = 1.0e-2,
temp_final: float = 1e-6,
cooling_factor: float = 0.99,
repeat: int = 100,
trace_func: Callable[[T, float, float, float, bool], None] = None,
) -> T:
"""Minimize solution using Simulated Annealing meta-heuristic.
Args:
initial: Initial solution of type T to the problem.
cost_func: Callable which takes current solution of type T, evaluates it
and returns float with the cost estimate. The better solution is,
the lower resulting value should be; negative values are allowed.
move_func: Callable which takes current solution of type T and returns a
new solution candidate of type T which is random iteration over
input solution. The input solution, which is argument to this
callback should not be mutated.
random_sample: Callable which gives uniformly sampled random value from
the [0, 1) interval on each call.
temp_initial: Optional initial temperature for simulated annealing
optimization. Scale of this value is cost_func-dependent.
temp_final: Optional final temperature for simulated annealing
optimization, where search should be stopped. Scale of this value is
cost_func-dependent.
cooling_factor: Optional factor to be applied to the current temperature
and give the new temperature, this must be strictly greater than 0
and strictly lower than 1.
repeat: Optional number of iterations to perform at each given
temperature.
trace_func: Optional callback for tracing simulated annealing progress.
This is going to be called at each algorithm step for the arguments:
solution candidate (T), current temperature (float), candidate cost
(float), probability of accepting candidate (float), and acceptance
decision (boolean).
Returns:
The best solution found.
Raises:
ValueError: When supplied arguments are invalid.
"""
if not 0.0 < cooling_factor < 1.0:
raise ValueError("Cooling factor must be within (0, 1) range")
temp = temp_initial
sol = initial
sol_cost = cost_func(initial)
best = sol
best_cost = sol_cost
if trace_func:
trace_func(sol, temp, sol_cost, 1.0, True)
while temp > temp_final:
for _ in range(0, repeat):
# Find a new solution candidate and evaluate its cost.
cand = move_func(sol)
cand_cost = cost_func(cand)
# Store the best solution, regardless if it is accepted or not.
if best_cost > cand_cost:
best = cand
best_cost = cand_cost
accepted, probability = _accept(random_sample(), cand_cost - sol_cost, temp)
if accepted:
sol = cand
sol_cost = cand_cost
if trace_func:
trace_func(cand, temp, cand_cost, probability, accepted)
temp *= cooling_factor
return best
|
# xVector Engine Client
# Copyright (c) 2011 James Buchwald
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warran | ty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Contains code for nicely reporting errors to the user.
"""
import logging
impor | t traceback
from PyQt4 import QtGui
from xVClient import ClientGlobals
mainlog = logging.getLogger("")
# Severity constants
FatalError = 1
"""Fatal error, forces termination of application."""
NormalError = 2
"""Normal error, this has impact but does not crash the program."""
WarningError = 3
"""Warning, this does not affect function but should cause concern."""
NoticeError = 4
"""General information."""
def ShowError(message, severity=NormalError, parent=None):
"""
Displays an error message to the user and waits for a response.
"""
dlg = QtGui.QMessageBox(parent)
dlg.setText(message)
if severity == FatalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Fatal Error")
elif severity == NormalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Error")
elif severity == WarningError:
dlg.setIcon(QtGui.QMessageBox.Warning)
dlg.setWindowTitle("Warning")
elif severity == NoticeError:
dlg.setIcon(QtGui.QMessageBox.Information)
dlg.setWindowTitle("Notice")
else:
dlg.setIcon(QtGui.QMessageBox.NoIcon)
dlg.setWindowTitle("Message")
dlg.exec_()
def ShowException(severity=NormalError, start_msg='An error has occurred!', parent=None):
'''
Displays the currently-handled exception in an error box.
'''
msg = start_msg + "\n\n" + traceback.format_exc()
ShowError(msg, severity, parent)
class ErrorMessageHandler(logging.Handler):
'''
Logging handler that displays messages in Qt message boxes.
'''
def __init__(self, parent=None):
'''
Creates a new handler.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
super(ErrorMessageHandler,self).__init__()
self.Parent = parent
'''Parent widget for errors to be displayed under.'''
def _ShowError(self, message):
'''
Shows an error message and returns immediately.
@type message: string
@param message: Message to display.
'''
app = ClientGlobals.Application
wnd = QtGui.QMessageBox(parent=self.Parent)
wnd.setIcon(QtGui.QMessageBox.Critical)
wnd.setWindowTitle("Error")
wnd.setStandardButtons(QtGui.QMessageBox.Ok)
wnd.setText(message)
wnd.exec_()
def emit(self, record):
self._ShowError(record.getMessage())
def ConfigureLogging(parent=None):
'''
Configures the logging mechanism to report errors as dialog boxes.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
# Set up the error handler (output to a message box).
handler = ErrorMessageHandler(parent)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
handler.setLevel(logging.ERROR)
mainlog.addHandler(handler)
# Send lower-level messages to stderr.
lowhandler = logging.StreamHandler()
lowhandler.setFormatter(formatter)
lowhandler.setLevel(logging.DEBUG)
mainlog.addHandler(lowhandler)
# Make sure that the logger catches all levels of messages.
mainlog.setLevel(logging.DEBUG)
|
from | bs4 import BeautifulSoup
import xlsxwriter
workbook= xlsxwriter.Workbook("data.xlsx")
worksheet = workbook.add_worksheet()
f = open('rough.html',"r")
data=f.read()
soup=BeautifulSoup(data)
div = soup.find('div', {"class":'dataTables_scroll'})
table=div.find('table')
tbody=div.find('tbody')
rows=tbody.find_all('tr')
rowno = 0
for row in rows:
a=row.find_all('a')
td=row.find_all('td')
worksheet.write(rowno, 1, a[2].text)
worksheet.write(rowno, 2, td[3].text[td[3].text.fi | nd('P:'):])
worksheet.write(rowno, 3, a[3].text)
worksheet.write(rowno, 4, a[4].text)
worksheet.write(rowno, 5, a[3].text)
worksheet.write(rowno, 6, td[6].text)
rowno=rowno+1
workbook.close()
print "Done"
|
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuratio | n('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
| sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bar",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.2"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.1"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.0"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.browse(("Typing :: Typed", "Topic :: Utilities"))
]
def test_browse_packages(mocker):
spinstance = mocker.Mock(
**{
"browse.return_value": [
["foob | ar", "1.2.3"],
| ["foobar", "1.2.2"],
["foobar", "1.2.1"],
["foobar", "1.2.0"],
["quux", "0.1.0"],
["gnusto", "0.0.0"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(
qypi,
["browse", "--packages", "Typing :: Typed", "Topic :: Utilities"],
)
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.browse(("Typing :: Typed", "Topic :: Utilities"))
]
@pytest.mark.usefixtures("mock_pypi_json")
def test_info():
r = CliRunner().invoke(qypi, ["info", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_explicit_latest_version():
r = CliRunner().invoke(qypi, ["info", "foobar==1.0.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_explicit_version():
r = CliRunner().invoke(qypi, ["info", "foobar==0.2.0"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "danielstewart@frye.com",\n'
' "name": "Sonya Johnson",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "maynardtim@hotmail.com",\n'
' "name": "Stephen Romero",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Wood",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2017-02-04T12:34:05.766270Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/0.2.0",\n'
' "summary": "Water audience cut call.",\n'
' "unknown_field": "passed through",\n'
' "url": "http://www.sanchez.net/index.htm",\n'
' "version": "0.2.0"\n'
" }\n"
"]\n"
)
@pytest.mark.usefixtures("mock_pypi_json")
def test_info_description():
r = CliRunner().invoke(qypi, ["info", "--description", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "classifiers": [\n'
' "Topic :: Software Development :: Testing",\n'
' "UNKNOWN"\n'
" ],\n"
' "description": "foobar v1.0.0\\n\\nDream political close attorney sit cost inside. Seek hard can bad investment authority walk we. Sing range late use speech citizen.\\n\\nCan money issue claim onto really case. Fact garden along all book sister trip step.\\n\\nView table woman her production result. Fine allow prepare should traditional. Send cultural two care eye.\\n\\nGenerated with Faker",\n'
' "name": "foobar",\n'
' "people": [\n'
" {\n"
' "email": "megan30@daniels.info",\n'
' "name": "Brandon Perkins",\n'
' "role": "author"\n'
" },\n"
" {\n"
' "email": "cspencer@paul-fisher.com",\n'
' "name": "Denise Adkins",\n'
' "role": "maintainer"\n'
" }\n"
" ],\n"
' "platform": "Amiga",\n'
' "project_url": "https://dummy.nil/pypi/foobar",\n'
' "release_date": "2019-02-01T09:17:59.172284Z",\n'
' "release_url": "https://dummy.nil/pypi/foobar/1.0.0",\n'
' "summary": "Including drive environment my it.",\n'
' "unknown_field": "passed through",\n'
' "url": "https://www.johnson.com/homepage.php",\n'
' "version": "1.0.0"\n'
" }\n"
"]\n |
# -*- coding: utf-8 -*-
"""Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5) | )
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a | ': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Check for issue #4806: Does a TypeError in a generator get propagated with the
right error message?
>>> def broken(): raise TypeError("myerror")
...
>>> g(*(broken() for i in range(1)))
Traceback (most recent call last):
...
TypeError: myerror
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2})
Traceback (most recent call last):
...
TypeError: f() keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h)
Traceback (most recent call last):
...
TypeError: h() argument after * must be an iterable, not function
>>> dir(*h)
Traceback (most recent call last):
...
TypeError: dir() argument after * must be an iterable, not function
>>> None(*h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after * must be an iterable, \
not function
>>> h(**h)
Traceback (most recent call last):
...
TypeError: h() argument after ** must be a mapping, not function
>>> dir(**h)
Traceback (most recent call last):
...
TypeError: dir() argument after ** must be a mapping, not function
>>> None(**h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after ** must be a mapping, \
not function
>>> dir(b=1, **{'b': 1})
Traceback (most recent call last):
...
TypeError: dir() got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters should allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1})
Traceback (most recent call last):
...
TypeError: id() takes no keyword arguments
A corner case of keyword dictionary items being deleted during
the function call setup. See <http://bugs.python.org/issue2016>.
>>> class Name(str):
... def __eq__(self, other):
... try:
... del x[self]
... except KeyError:
... pass
... return str.__eq__(self, other)
... def __hash__(self):
... return str.__hash__(self)
>>> x = {Name("a"):1, Name("b"):2}
>>> def f(a, b):
... print a,b
>>> f(**x)
1 2
An obscure message:
>>> def f(a, b):
... pass
>>> f(b=1)
Traceback (most recent call last):
...
TypeError: f() takes exactly 2 arguments (1 given)
The number of arguments passed in includes keywords:
>>> def f(a):
... pass
>>> f(6, a=4, *(1, 2, 3))
Traceback (most recent call last):
...
TypeError: f() takes exactly 1 argument (5 given)
"""
import unittest
import sys
from test import test_support
class ExtCallTest(unittest.TestCase):
def test_unicode_keywords(self):
def f(a):
return a
self.assertEqual(f(**{u'a': 4}), 4)
self.assertRaises(TypeError, f, **{u'stören': 4})
self.assertRaises(TypeError, f, **{u'someLongString':2})
try:
f(a=4, **{u'a': 4})
except TypeError:
pass
else:
self.fail("duplicate arguments didn't raise")
def test_main():
test_support.run_doctest(sys.modules[__name__], True)
test_support.run_unittest(ExtCallTest)
if __name__ == '__main__':
test_main()
|
import os
# Django settings for mysite project.
DEBUG = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', | 'your_email@example.com'),
)
MANAGERS = ADMINS
SITE_ROOT | = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
DATE_INPUT_FORMATS = ('%d/%m/%Y')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SITE_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
| '"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_ | rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', ['baz'])
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
if is_public:
request = self.mock_rest_request(GET={'is_public': 'tRuE'})
elif is_public is None:
request = self.mock_rest_request(GET={})
else:
request = self.mock_rest_request(GET={'is_public': 'fAlsE'})
nc.flavor_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': '1'}}),
mock.Mock(**{'to_dict.return_value': {'id': '2'}}),
]
response = nova.Flavors().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "1"}, {"id": "2"}]}')
nc.flavor_list.assert_called_once_with(request, is_public=is_public,
get_extras=False)
def test_flavor_list_private(self):
self._test_flavor_list_public(is_public=False)
def test_flavor_list_public(self):
self._test_flavor_list_public(is_public=True)
def test_flavor_list_public_none(self):
self._test_flavor_list_public(is_public=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_ |
#!/usr/bin/python3
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return(time_string)
(mins, secs) = time_string.strip().split(splitter)
return(mins + '.' + secs)
d | ef get_coach_data(filename):
try:
with open(filename) as fn:
data = fn.readline()
| return(data.strip().split(','))
except IOError as ioerr:
print('File Error:' + str(ioerr))
return(None)
sarah = get_coach_data('sarah2.txt')
(sarah_name, sarach_dob) = sarah.pop(0), sarah.pop(0)
print(sarah_name + "'s fastest time are:"+
str(sorted(set([sanitize(t) for t in sarah]))[0:3]))
|
import os
def create_peanut(peanut_name):
peanut_dir = './peanuts/%s' % peanut_name
if os.path.exists(peanut_dir):
print('Peanut already exists')
return
os.mkdir(peanut_dir)
os.mkdir(peanut_dir + '/templates')
f = open(peanut_dir + '/__init__.py', 'w')
f.w | rite('')
f.flush()
f.close()
f = open(peanut_dir + '/main.py', 'w')
f.write('\n__META__ = {\n')
f.write(" 'displayName': '%s',\n" % peanut_name)
f.write(" 'description': 'Peanut description',\n")
f.write(" 'version': '0.1',\n")
f.write(" 'enabled': True,\n")
f.write("}\n\n")
f.write('def load(peanut):\n')
f.write(" print('Loading peanut %s')\n" % | peanut_name)
f.flush()
f.close()
def clean():
if os.path.exists('./.__tmp__') and os.path.isdir('./.__tmp__'):
import shutil
shutil.rmtree('./.__tmp__')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('presence', '0001_initial'),
| ]
operations = [
migrations.CreateModel(
| name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.Entity')),
],
),
]
|
icients, the reflectance and the transmittance
of the multilayer system.in the down-up direction of
propagation.
Returns
-------
out : dictionary
A dictionary with the reflection and transmission
coefficients, the reflectance and the transmittance of the
multilayer system. The keys are {'r', 't', 'R', 'T'}.
"""
return self.__coefficientsDownUp
def calculateFx(self, z, wlength, angle, index=0):
"""
Calculates Fx(z; lambda, theta) of the multilayer.
The direction x is parallel to both the layer interfaces and the
plane of incidence of the light (or the direction of the
intersection between plane of incidence and interfaces). The
plane of incidence is always perpendicular to the interfaces.
The direction z is perpendicular to the interfaces.
The state of the multilayer will be changed according to the
parameters passed to the method and then F(z) will be
calculated.
Fx is defined only for TM waves. If the multilayer is currently
in TE, it will be changed to TM to perform the calculations.
Parameters
----------
z : float
The z coordinate of the emitting dipole.
wlength : float
The wavelength of the light across the multilayer In the
same units as in the file from which the refractive
indices were loaded.
angle : float
The propagation angle in radians
index : int
The index of the layer where we are fixing the propagation
angle.
Returns
-------
out : complex128
The value of Fx(z, lambda, angle)
"""
# Determine what has to be changed and wether or not to update
# the matrices
if self.getPolarization() != 'TM':
self.setPolarization('TM')
if wlength != self.getWlength():
self.setWlength(wlength)
self.setPropAngle(angle, index)
if self.getPropAngle(index) != angle:
self.setPropAngle(angle, index)
if self.getCharMatrixUpDown() == None:
self.calcMatrices()
self.updateCharMatrix()
# Calculate Fx(z)
# Find out in which layer the dipole is located
dipole_layer_index = self.getIndexAtPos(z)
# Calculate Fx according to the position of the dipole
if dipole_layer_index == 0:
# Fx(z) in case the dipole is in the top medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
n0 = self.getRefrIndex(0)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
r01 = self.getCoefficientsUpDown()['r']
# Calculate function
fx = 1 - r01 * np.exp(2 * eta0 * (z - z0) * 1j)
elif dipole_layer_index == self.numLayers() - 1:
# Fx(z) in case the dipole is in the bottom medium
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaN = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nN = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaN = 2 * np.pi * np.sqrt(nN ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# Retreive coefficients
t1N = self.getCoefficientsUpDown()['t']
# Calculate function. We handle separately the case
# where theta0 is 0 to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along x
# there is no light propagating along x.
if theta0 == np.pi / 2:
fx = 1 + 0j
else:
fx = t1N * np.exp(eta0 * (z - z0) * 1j - etaN * z * 1j) * \
np.cos(thetaN) / np.cos(theta0)
else:
# Fx(z) in case the dipole is within any of the layers
wavelength = self.getWlength()
theta0 = self.getPropAngle(0)
thetaj = self.getPropAngle(dipole_layer_index)
n0 = self.getRefrIndex(0)
nj = self.getRefrIndex(dipole_layer_index)
# Calculate parameters
z0 = self.getPosition(0)
zj = self.getPosition(dipole_layer_index)
zj1 = self.getPosition(dipole_layer_index - 1)
dj = self.getThickness(dipole_layer_index)
eta0 = 2 * np.pi * np.sqrt(n0 ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
etaj = 2 * np.pi * np.sqrt(nj ** 2 - (n0 * np.sin(theta0)) ** 2) \
/ wavelength
# | Retreive coefficients. We have to build some
# submultilayers first.
| # Submultilayer from the top medium to dipole_layer_index.
rilist = [self.getRefrIndex(0)]
alist = [self.getPropAngle(0)]
layers = [self.__stack[0]['medium']]
for index in range(1, dipole_layer_index):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[dipole_layer_index]['medium'])
rilist.append(self.getRefrIndex(dipole_layer_index))
alist.append(self.getPropAngle(dipole_layer_index))
sub_above = Multilayer(layers)
sub_above.setWlength(wavelength, rilist)
sub_above.setPropAngle(alist)
sub_above.setPolarization('TM')
sub_above.calcMatrices()
sub_above.updateCharMatrix()
# Submultilayer from dipole_layer_index to the bottom
# medium.
rilist = [self.getRefrIndex(dipole_layer_index)]
alist = [self.getPropAngle(dipole_layer_index)]
layers = [self.__stack[dipole_layer_index]['medium']]
for index in range(dipole_layer_index + 1,
self.numLayers() - 1):
layers.append([self.__stack[index]['medium'],
self.getThickness(index)])
rilist.append(self.getRefrIndex(index))
alist.append(self.getPropAngle(index))
layers.append(self.__stack[self.numLayers() - 1]['medium'])
rilist.append(self.getRefrIndex(self.numLayers() - 1))
alist.append(self.getPropAngle(self.numLayers() - 1))
sub_below = Multilayer(layers)
sub_below.setWlength(wavelength, rilist)
sub_below.setPropAngle(alist)
sub_below.setPolarization(self.getPolarization())
sub_below.calcMatrices()
sub_below.updateCharMatrix()
# Now we can retreive the relevant coefficients
t1j = sub_above.getCoefficientsUpDown()['t']
rjjp1 = sub_below.getCoefficientsUpDown()['r']
rjjm1 = sub_above.getCoefficientsDownUp()['r']
# Calculate function. We handle separately the case
# where theta0 is 0 to avoid a NaN result. Bear in
# mind that if we have a dipole oscilating along x
# there is no light propagating along x.
if theta0 == np.pi / 2:
fx = 1 + 0j
else:
numerator = t1j * \
(1 - rjjp1 * np.exp(2 * etaj * (z - zj) * 1j))
denominator = \
1 - rjjp1 * rjjm1 * np.exp(2 * etaj * dj * 1j)
factor = np.exp(eta0 * (z - z0) * 1j - etaj * \
(z - zj1) * 1j) * np.cos(thetaj) / np.cos(theta0)
fx = numerator * factor / denominator
return np.complex |
import sys
import operator
import collections
import random
import string
import heapq
# @include
def find_student_with_highest_best_of_three_scores(name_score_data):
student_scores = collections.defaultdict(list)
for line in name_score_data:
name, score = line.split()
if len(student_scores[name]) < 3:
heapq.heappush(student_scores[name], int(score))
else:
heapq.heappushpop(student_scores[name], int(score))
return max([(sum(scores), name) for name, scores in student_scores.items()
if len(scores) == 3],
key=operator.itemgetter(0),
default='no such student')[1]
# @exclude
def rand_string(length):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def simple_test():
with open('scores.txt', 'w') as ofs:
ofs.write('''adnan 100
amit 99
adnan 98
thl 90
adnan 10
amit 100
thl 99
thl 95
dd 100
dd 100
adnan 95''')
with open('scores.txt') as name_score_data:
result = find_student_with_highest_best_of_three_scores(name_score_data)
print('result =', result)
| assert result == 'adnan'
def main():
simple_test()
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)
with open('scores.txt', 'w') as ofs:
for i in range(n):
test_num = random.randint(0, 20)
name = rand_string(random.ra | ndint(5, 10))
for _ in range(test_num):
print(name, random.randint(0, 100), file=ofs)
with open('scores.txt') as name_score_data:
name = find_student_with_highest_best_of_three_scores(name_score_data)
name_score_data.seek(0)
print('top student is', name)
if __name__ == '__main__':
main()
|
from pathlib import Path
import os
import structlog
log = structlog.get_logger()
_config = None
def get():
global _config
if not isinstance(_config, _build_config):
_config = _build_config()
return _config
class _build_config:
def __init_ | _(self):
self._config = {}
self.dos_install_dir = os.environ["DOS_BIN"]
self.dos_log_dir = os.environ["DOS_LOG"]
self.env_var_contexts = ["dos"]
# load from toml file
self._load_toml_config()
# load from env variables
self._load_env_vars()
def get(self, key, default=None):
return self._config.get(key, None)
| def put(self, key, value, context="default"):
self.add_config_value(key, value, context=context)
def check(self, key):
return key in self._config
def add_config_value(self, key, value, context="default"):
ctx_key = f"{context}_{key}"
self._config[ctx_key] = value
log.debug("set config", context=context, key=key, ctx_key=ctx_key)
def add_path_value(self, key, value, context):
self.add_config_value(key, Path(value), context=context)
def _load_toml_config(self):
# potentially add env var contexts
log.debug("loading toml config", file_name="TODO <> TODO")
def _load_env_vars(self):
log.debug("loading environ config")
for key in os.environ:
parts = key.lower().split("_")
ctx = parts[0]
if ctx not in self.env_var_contexts:
continue
log.info(f"discovered environ config", key=key)
if len(parts) == 2:
self.add_config_value(
parts[1], # key
os.environ[key], # value from env
context=ctx, # give context
)
elif len(parts) == 3:
k = parts[2]
t = parts[1]
if t == "path":
self.add_path_value(k, os.environ[key], context=ctx)
else:
raise ValueError(f'unrecognized key type "{t}" for "{key}"')
else:
ValueError(
f"incorrect number of parts for env var: {key}, expected 2 or 3"
)
def dos_bin(self):
log.info(f"dev ops shell bin: {self.dos_install_dir}")
dos_bin = Path(self.dos_install_dir)
dos_bin.mkdir(parents=True, exist_ok=True)
return dos_bin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.