repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
cgstudiomap/cgstudiomap | main/eggs/Django-1.9-py2.7.egg/django/test/client.py | 132 | 26745 | from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
if backend is None:
backend = settings.AUTHENTICATION_BACKENDS[0]
user.backend = backend
self._login(user)
def _login(self, user):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| agpl-3.0 |
apporc/cinder | cinder/api/__init__.py | 26 | 1394 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import paste.urlmap
from cinder.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api:
LOG.warning(_LW('The v1 api is deprecated and will be removed in the '
'Liberty release. You should set enable_v1_api=false '
'and enable_v2_api=true in your cinder.conf file.'))
else:
del local_conf['/v1']
if not CONF.enable_v2_api:
del local_conf['/v2']
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
| apache-2.0 |
BlueMeanie/PeerShares | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
pdellaert/ansible | lib/ansible/modules/network/netscaler/netscaler_nitro_request.py | 21 | 28641 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_nitro_request
short_description: Issue Nitro API requests to a Netscaler instance.
description:
- Issue Nitro API requests to a Netscaler instance.
- This is intended to be a short hand for using the uri Ansible module to issue the raw HTTP requests directly.
- It provides consistent return values and has no other dependencies apart from the base Ansible runtime environment.
- This module is intended to run either on the Ansible control node or a bastion (jumpserver) with access to the actual Netscaler instance
version_added: "2.5.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
nsip:
description:
- The IP address of the Netscaler or MAS instance where the Nitro API calls will be made.
- "The port can be specified with the colon C(:). E.g. C(192.168.1.1:555)."
nitro_user:
description:
- The username with which to authenticate to the Netscaler node.
required: true
nitro_pass:
description:
- The password with which to authenticate to the Netscaler node.
required: true
nitro_protocol:
choices: [ 'http', 'https' ]
default: http
description:
- Which protocol to use when accessing the Nitro API objects.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
default: 'yes'
type: bool
nitro_auth_token:
description:
- The authentication token provided by the C(mas_login) operation. It is required when issuing Nitro API calls through a MAS proxy.
resource:
description:
- The type of resource we are operating on.
- It is required for all I(operation) values except C(mas_login) and C(save_config).
name:
description:
- The name of the resource we are operating on.
- "It is required for the following I(operation) values: C(update), C(get), C(delete)."
attributes:
description:
- The attributes of the Nitro object we are operating on.
- "It is required for the following I(operation) values: C(add), C(update), C(action)."
args:
description:
- A dictionary which defines the key arguments by which we will select the Nitro object to operate on.
- "It is required for the following I(operation) values: C(get_by_args), C('delete_by_args')."
filter:
description:
- A dictionary which defines the filter with which to refine the Nitro objects returned by the C(get_filtered) I(operation).
operation:
description:
- Define the Nitro operation that we want to perform.
choices:
- add
- update
- get
- get_by_args
- get_filtered
- get_all
- delete
- delete_by_args
- count
- mas_login
- save_config
- action
expected_nitro_errorcode:
description:
- A list of numeric values that signify that the operation was successful.
default: [0]
required: true
action:
description:
- The action to perform when the I(operation) value is set to C(action).
- Some common values for this parameter are C(enable), C(disable), C(rename).
instance_ip:
description:
- The IP address of the target Netscaler instance when issuing a Nitro request through a MAS proxy.
instance_name:
description:
- The name of the target Netscaler instance when issuing a Nitro request through a MAS proxy.
instance_id:
description:
- The id of the target Netscaler instance when issuing a Nitro request through a MAS proxy.
'''
EXAMPLES = '''
- name: Add a server
delegate_to: localhost
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: add
resource: server
name: test-server-1
attributes:
name: test-server-1
ipaddress: 192.168.1.1
- name: Update server
delegate_to: localhost
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: update
resource: server
name: test-server-1
attributes:
name: test-server-1
ipaddress: 192.168.1.2
- name: Get server
delegate_to: localhost
register: result
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: get
resource: server
name: test-server-1
- name: Delete server
delegate_to: localhost
register: result
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: delete
resource: server
name: test-server-1
- name: Rename server
delegate_to: localhost
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: action
action: rename
resource: server
attributes:
name: test-server-1
newname: test-server-2
- name: Get server by args
delegate_to: localhost
register: result
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: get_by_args
resource: server
args:
name: test-server-1
- name: Get server by filter
delegate_to: localhost
register: result
netscaler_nitro_request:
nsip: "{{ nsip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: get_filtered
resource: server
filter:
ipaddress: 192.168.1.2
# Doing a NITRO request through MAS.
# Requires to have an authentication token from the mas_login and used as the nitro_auth_token parameter
# Also nsip is the MAS address and the target Netscaler IP must be defined with instance_ip
# The rest of the task arguments remain the same as when issuing the NITRO request directly to a Netscaler instance.
- name: Do mas login
delegate_to: localhost
register: login_result
netscaler_nitro_request:
nsip: "{{ mas_ip }}"
nitro_user: "{{ nitro_user }}"
nitro_pass: "{{ nitro_pass }}"
operation: mas_login
- name: Add resource through MAS proxy
delegate_to: localhost
netscaler_nitro_request:
nsip: "{{ mas_ip }}"
nitro_auth_token: "{{ login_result.nitro_auth_token }}"
instance_ip: "{{ nsip }}"
operation: add
resource: server
name: test-server-1
attributes:
name: test-server-1
ipaddress: 192.168.1.7
'''
RETURN = '''
nitro_errorcode:
description: A numeric value containing the return code of the NITRO operation. When 0 the operation is successful. Any non zero value indicates an error.
returned: always
type: int
sample: 0
nitro_message:
description: A string containing a human readable explanation for the NITRO operation result.
returned: always
type: str
sample: Success
nitro_severity:
description: A string describing the severity of the NITRO operation error or NONE.
returned: always
type: str
sample: NONE
http_response_data:
description: A dictionary that contains all the HTTP response's data.
returned: always
type: dict
sample: "status: 200"
http_response_body:
description: A string with the actual HTTP response body content if existent. If there is no HTTP response body it is an empty string.
returned: always
type: str
sample: "{ errorcode: 0, message: Done, severity: NONE }"
nitro_object:
description: The object returned from the NITRO operation. This is applicable to the various get operations which return an object.
returned: when applicable
type: list
sample:
-
ipaddress: "192.168.1.8"
ipv6address: "NO"
maxbandwidth: "0"
name: "test-server-1"
port: 0
sp: "OFF"
state: "ENABLED"
nitro_auth_token:
description: The token returned by the C(mas_login) operation when successful.
returned: when applicable
type: str
sample: "##E8D7D74DDBD907EE579E8BB8FF4529655F22227C1C82A34BFC93C9539D66"
'''
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.basic import AnsibleModule
import codecs
class NitroAPICaller(object):
_argument_spec = dict(
nsip=dict(
fallback=(env_fallback, ['NETSCALER_NSIP']),
),
nitro_user=dict(
fallback=(env_fallback, ['NETSCALER_NITRO_USER']),
),
nitro_pass=dict(
fallback=(env_fallback, ['NETSCALER_NITRO_PASS']),
no_log=True
),
nitro_protocol=dict(
choices=['http', 'https'],
fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']),
default='http'
),
validate_certs=dict(
default=True,
type='bool'
),
nitro_auth_token=dict(
type='str',
no_log=True
),
resource=dict(type='str'),
name=dict(type='str'),
attributes=dict(type='dict'),
args=dict(type='dict'),
filter=dict(type='dict'),
operation=dict(
type='str',
required=True,
choices=[
'add',
'update',
'get',
'get_by_args',
'get_filtered',
'get_all',
'delete',
'delete_by_args',
'count',
'mas_login',
# Actions
'save_config',
# Generic action handler
'action',
]
),
expected_nitro_errorcode=dict(
type='list',
default=[0],
),
action=dict(type='str'),
instance_ip=dict(type='str'),
instance_name=dict(type='str'),
instance_id=dict(type='str'),
)
def __init__(self):
self._module = AnsibleModule(
argument_spec=self._argument_spec,
supports_check_mode=False,
)
self._module_result = dict(
failed=False,
)
# Prepare the http headers according to module arguments
self._headers = {}
self._headers['Content-Type'] = 'application/json'
# Check for conflicting authentication methods
have_token = self._module.params['nitro_auth_token'] is not None
have_userpass = None not in (self._module.params['nitro_user'], self._module.params['nitro_pass'])
login_operation = self._module.params['operation'] == 'mas_login'
if have_token and have_userpass:
self.fail_module(msg='Cannot define both authentication token and username/password')
if have_token:
self._headers['Cookie'] = "NITRO_AUTH_TOKEN=%s" % self._module.params['nitro_auth_token']
if have_userpass and not login_operation:
self._headers['X-NITRO-USER'] = self._module.params['nitro_user']
self._headers['X-NITRO-PASS'] = self._module.params['nitro_pass']
# Do header manipulation when doing a MAS proxy call
if self._module.params['instance_ip'] is not None:
self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_IP'] = self._module.params['instance_ip']
elif self._module.params['instance_name'] is not None:
self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_NAME'] = self._module.params['instance_name']
elif self._module.params['instance_id'] is not None:
self._headers['_MPS_API_PROXY_MANAGED_INSTANCE_ID'] = self._module.params['instance_id']
def edit_response_data(self, r, info, result, success_status):
# Search for body in both http body and http data
if r is not None:
result['http_response_body'] = codecs.decode(r.read(), 'utf-8')
elif 'body' in info:
result['http_response_body'] = codecs.decode(info['body'], 'utf-8')
del info['body']
else:
result['http_response_body'] = ''
result['http_response_data'] = info
# Update the nitro_* parameters according to expected success_status
# Use explicit return values from http response or deduce from http status code
# Nitro return code in http data
result['nitro_errorcode'] = None
result['nitro_message'] = None
result['nitro_severity'] = None
if result['http_response_body'] != '':
try:
data = self._module.from_json(result['http_response_body'])
except ValueError:
data = {}
result['nitro_errorcode'] = data.get('errorcode')
result['nitro_message'] = data.get('message')
result['nitro_severity'] = data.get('severity')
# If we do not have the nitro errorcode from body deduce it from the http status
if result['nitro_errorcode'] is None:
# HTTP status failed
if result['http_response_data'].get('status') != success_status:
result['nitro_errorcode'] = -1
result['nitro_message'] = result['http_response_data'].get('msg', 'HTTP status %s' % result['http_response_data']['status'])
result['nitro_severity'] = 'ERROR'
# HTTP status succeeded
else:
result['nitro_errorcode'] = 0
result['nitro_message'] = 'Success'
result['nitro_severity'] = 'NONE'
def handle_get_return_object(self, result):
result['nitro_object'] = []
if result['nitro_errorcode'] == 0:
if result['http_response_body'] != '':
data = self._module.from_json(result['http_response_body'])
if self._module.params['resource'] in data:
result['nitro_object'] = data[self._module.params['resource']]
else:
del result['nitro_object']
def fail_module(self, msg, **kwargs):
self._module_result['failed'] = True
self._module_result['changed'] = False
self._module_result.update(kwargs)
self._module_result['msg'] = msg
self._module.fail_json(**self._module_result)
def main(self):
if self._module.params['operation'] == 'add':
result = self.add()
if self._module.params['operation'] == 'update':
result = self.update()
if self._module.params['operation'] == 'delete':
result = self.delete()
if self._module.params['operation'] == 'delete_by_args':
result = self.delete_by_args()
if self._module.params['operation'] == 'get':
result = self.get()
if self._module.params['operation'] == 'get_by_args':
result = self.get_by_args()
if self._module.params['operation'] == 'get_filtered':
result = self.get_filtered()
if self._module.params['operation'] == 'get_all':
result = self.get_all()
if self._module.params['operation'] == 'count':
result = self.count()
if self._module.params['operation'] == 'mas_login':
result = self.mas_login()
if self._module.params['operation'] == 'action':
result = self.action()
if self._module.params['operation'] == 'save_config':
result = self.save_config()
if result['nitro_errorcode'] not in self._module.params['expected_nitro_errorcode']:
self.fail_module(msg='NITRO Failure', **result)
self._module_result.update(result)
self._module.exit_json(**self._module_result)
def exit_module(self):
self._module.exit_json()
def add(self):
# Check if required attributes are present
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['attributes'] is None:
self.fail_module(msg='NITRO resource attributes are undefined.')
url = '%s://%s/nitro/v1/config/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
)
data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']})
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
data=data,
method='POST',
)
result = {}
self.edit_response_data(r, info, result, success_status=201)
if result['nitro_errorcode'] == 0:
self._module_result['changed'] = True
else:
self._module_result['changed'] = False
return result
def update(self):
# Check if required attributes are arguments present
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['name'] is None:
self.fail_module(msg='NITRO resource name is undefined.')
if self._module.params['attributes'] is None:
self.fail_module(msg='NITRO resource attributes are undefined.')
url = '%s://%s/nitro/v1/config/%s/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
self._module.params['name'],
)
data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']})
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
data=data,
method='PUT',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
if result['nitro_errorcode'] == 0:
self._module_result['changed'] = True
else:
self._module_result['changed'] = False
return result
def get(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['name'] is None:
self.fail_module(msg='NITRO resource name is undefined.')
url = '%s://%s/nitro/v1/config/%s/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
self._module.params['name'],
)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='GET',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
self.handle_get_return_object(result)
self._module_result['changed'] = False
return result
def get_by_args(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['args'] is None:
self.fail_module(msg='NITRO args is undefined.')
url = '%s://%s/nitro/v1/config/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
)
args_dict = self._module.params['args']
args = ','.join(['%s:%s' % (k, args_dict[k]) for k in args_dict])
args = 'args=' + args
url = '?'.join([url, args])
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='GET',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
self.handle_get_return_object(result)
self._module_result['changed'] = False
return result
def get_filtered(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['filter'] is None:
self.fail_module(msg='NITRO filter is undefined.')
keys = list(self._module.params['filter'].keys())
filter_key = keys[0]
filter_value = self._module.params['filter'][filter_key]
filter_str = '%s:%s' % (filter_key, filter_value)
url = '%s://%s/nitro/v1/config/%s?filter=%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
filter_str,
)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='GET',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
self.handle_get_return_object(result)
self._module_result['changed'] = False
return result
def get_all(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
url = '%s://%s/nitro/v1/config/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
)
print('headers %s' % self._headers)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='GET',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
self.handle_get_return_object(result)
self._module_result['changed'] = False
return result
def delete(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['name'] is None:
self.fail_module(msg='NITRO resource is undefined.')
# Deletion by name takes precedence over deletion by attributes
url = '%s://%s/nitro/v1/config/%s/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
self._module.params['name'],
)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='DELETE',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
if result['nitro_errorcode'] == 0:
self._module_result['changed'] = True
else:
self._module_result['changed'] = False
return result
def delete_by_args(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['args'] is None:
self.fail_module(msg='NITRO args is undefined.')
url = '%s://%s/nitro/v1/config/%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
)
args_dict = self._module.params['args']
args = ','.join(['%s:%s' % (k, args_dict[k]) for k in args_dict])
args = 'args=' + args
url = '?'.join([url, args])
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='DELETE',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
if result['nitro_errorcode'] == 0:
self._module_result['changed'] = True
else:
self._module_result['changed'] = False
return result
def count(self):
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
url = '%s://%s/nitro/v1/config/%s?count=yes' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
method='GET',
)
result = {}
self.edit_response_data(r, info, result)
if result['http_response_body'] != '':
data = self._module.from_json(result['http_response_body'])
result['nitro_errorcode'] = data['errorcode']
result['nitro_message'] = data['message']
result['nitro_severity'] = data['severity']
if self._module.params['resource'] in data:
result['nitro_count'] = data[self._module.params['resource']][0]['__count']
self._module_result['changed'] = False
return result
def action(self):
# Check if required attributes are present
if self._module.params['resource'] is None:
self.fail_module(msg='NITRO resource is undefined.')
if self._module.params['attributes'] is None:
self.fail_module(msg='NITRO resource attributes are undefined.')
if self._module.params['action'] is None:
self.fail_module(msg='NITRO action is undefined.')
url = '%s://%s/nitro/v1/config/%s?action=%s' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
self._module.params['resource'],
self._module.params['action'],
)
data = self._module.jsonify({self._module.params['resource']: self._module.params['attributes']})
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
data=data,
method='POST',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
if result['nitro_errorcode'] == 0:
self._module_result['changed'] = True
else:
self._module_result['changed'] = False
return result
def mas_login(self):
url = '%s://%s/nitro/v1/config/login' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
)
login_credentials = {
'login': {
'username': self._module.params['nitro_user'],
'password': self._module.params['nitro_pass'],
}
}
data = 'object=\n%s' % self._module.jsonify(login_credentials)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
data=data,
method='POST',
)
print(r, info)
result = {}
self.edit_response_data(r, info, result, success_status=200)
if result['nitro_errorcode'] == 0:
body_data = self._module.from_json(result['http_response_body'])
result['nitro_auth_token'] = body_data['login'][0]['sessionid']
self._module_result['changed'] = False
return result
def save_config(self):
url = '%s://%s/nitro/v1/config/nsconfig?action=save' % (
self._module.params['nitro_protocol'],
self._module.params['nsip'],
)
data = self._module.jsonify(
{
'nsconfig': {},
}
)
r, info = fetch_url(
self._module,
url=url,
headers=self._headers,
data=data,
method='POST',
)
result = {}
self.edit_response_data(r, info, result, success_status=200)
self._module_result['changed'] = False
return result
def main():
nitro_api_caller = NitroAPICaller()
nitro_api_caller.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
orion-42/numerics-physics-stuff | knapsack.py | 1 | 1976 | from collections import namedtuple
knapsack_item = namedtuple("knapsack_item", ["weight", "value"])
max_weight = 15
items = [
knapsack_item(weight=2, value=2),
knapsack_item(weight=1, value=2),
knapsack_item(weight=12, value=4),
knapsack_item(weight=1, value=1),
knapsack_item(weight=4, value=10),
]
# max_weight * use n items -> optimal value
optimal_solution = [[0] * (len(items) + 1) for _ in range(max_weight + 1)]
for used_items in range(1, len(items) + 1):
for max_weight_index in range(1, max_weight + 1):
# we add the used_items'th + 1 item to the knapsack
item_to_add = items[used_items - 1]
if item_to_add.weight <= max_weight_index: # can we add this item?
# yes we can
optimal_value_with_new_item = item_to_add.value + optimal_solution[max_weight_index - item_to_add.weight][used_items - 1]
optimal_value_without_new_item = optimal_solution[max_weight_index][used_items - 1]
optimal_solution[max_weight_index][used_items] = max(optimal_value_with_new_item, optimal_value_without_new_item)
else:
# no it is too heavy
optimal_solution[max_weight_index][used_items] = optimal_solution[max_weight_index][used_items - 1]
# find items
items_to_take = []
value = optimal_solution[max_weight][len(items)]
current_max_weight = max_weight
current_items = len(items)
while value > 0:
while optimal_solution[current_max_weight][current_items] == value:
current_items -= 1
current_max_weight -= items[current_items].weight
value = optimal_solution[current_max_weight][current_items]
items_to_take.append(items[current_items])
print("items:")
for item in items: print(item)
print("max weight:", max_weight)
print("table: max_weight * use n items -> optimal value")
for row in optimal_solution: print(row)
print("optimal value:", optimal_solution[-1][-1])
print("items to take:")
for item in items_to_take: print(item)
| mit |
laosiaudi/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/sugar.py | 174 | 4821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to make it a bit easier to use LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.python.framework import ops as tf_ops
class ReshapeCoder(object):
"""Utility class for mapping to and from another shape.
For example, say you have a function `crop_center` which expects a
LabeledTensor with axes named ['batch', 'row', 'column', 'depth'], and
you have a LabeledTensor `masked_image_lt` with axes ['batch', 'row',
'column', 'channel', 'mask'].
To call `crop_center` with `masked_image_lt` you'd normally have to write:
>>> reshape_lt = lt.reshape(masked_image_lt, ['channel', 'mask'], ['depth'])
>>> crop_lt = crop_center(reshape_lt)
>>> result_lt = lt.reshape(crop_lt, ['depth'],
... [masked_image_lt.axes['channel'], masked_image_lt.axes['mask']])
ReshapeCoder takes care of this renaming logic for you, allowing you to
instead write:
>>> rc = ReshapeCoder(['channel', 'mask'], ['depth'])
>>> result_lt = rc.decode(crop_center(rc.encode(masked_image_lt)))
Here, `decode` restores the original axes 'channel' and 'mask', so
`crop_center` must not have modified the size of the 'depth' axis.
"""
@tc.accepts(object, tc.Collection(str),
tc.Collection(tc.Union(str, core.AxisLike)), tc.Optional(str))
def __init__(self, existing_axis_names, new_axes, name=None):
self._name = name
self._existing_axis_names = existing_axis_names
self._new_axes = new_axes
self._existing_axes = None
@tc.returns(core.LabeledTensor)
@tc.accepts(object, core.LabeledTensorLike)
def encode(self, labeled_tensor):
"""Reshape the input to the target shape.
If called several times, the axes named in existing_axis_names must be
identical.
Args:
labeled_tensor: The input tensor.
Returns:
The input reshaped to the target shape.
Raises:
ValueError: If the axes in existing_axis_names don't match the axes of
a tensor in a previous invocation of this method.
"""
with tf_ops.name_scope(self._name, 'lt_reshape_encode',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
reshape_lt = ops.reshape(labeled_tensor,
self._existing_axis_names,
self._new_axes,
name=scope)
axes = [labeled_tensor.axes[n] for n in self._existing_axis_names]
if self._existing_axes is not None and self._existing_axes != axes:
raise ValueError(
'input axes %r do not match axes from previous method call %r' %
(axes, self._existing_axes))
else:
self._existing_axes = axes
return reshape_lt
@tc.returns(core.LabeledTensor)
@tc.accepts(object, core.LabeledTensorLike)
def decode(self, labeled_tensor):
"""Reshape the input to the original shape.
This is the inverse of encode.
Encode must have been called at least once prior to this method being
called.
Args:
labeled_tensor: The input tensor.
Returns:
The input reshaped to the original shape.
Raises:
ValueError: If this method was called before encode was called.
"""
if self._existing_axes is None:
raise ValueError('decode called before encode')
with tf_ops.name_scope(self._name, 'lt_reshape_decode',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
new_axis_names = [axis if isinstance(axis, string_types) else
core.as_axis(axis).name for axis in self._new_axes]
return ops.reshape(labeled_tensor,
new_axis_names,
self._existing_axes,
name=scope)
| apache-2.0 |
pombredanne/invenio | modules/bibclassify/lib/bibclassify_daemon.py | 1 | 13895 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibClassify daemon.
FIXME: the code below requires collection table to be updated to add column:
clsMETHOD_fk mediumint(9) unsigned NOT NULL,
This is not clean and should be fixed.
"""
import sys
import time
import os
from invenio.dbquery import run_sql
from invenio.bibtask import task_init, write_message, task_update_progress, \
task_set_option, task_get_option, task_sleep_now_if_required, \
task_get_task_param
from invenio.bibclassify_engine import output_keywords_for_local_file
from invenio.config import CFG_BINDIR, CFG_TMPDIR
from invenio.intbitset import intbitset
from invenio.search_engine import get_collection_reclist
from invenio.bibdocfile import BibRecDocs
from invenio.bibclassify_text_extractor import is_pdf
from invenio.config import CFG_VERSION
# Global variables allowing to retain the progress of the task.
_INDEX = 0
_RECIDS_NUMBER = 0
## INTERFACE
def bibclassify_daemon():
"""Constructs the BibClassify bibtask."""
task_init(authorization_action='runbibclassify',
authorization_msg="BibClassify Task Submission",
description="Extract keywords and create a BibUpload "
"task.\nExamples:\n"
" $ bibclassify\n"
" $ bibclassify -i 79 -k HEP\n"
" $ bibclassify -c 'Articles' -k HEP\n",
help_specific_usage=" -i, --recid\t\tkeywords are extracted from "
"this record\n"
" -c, --collection\t\tkeywords are extracted from this collection\n"
" -k, --taxonomy\t\tkeywords are based on that reference",
version="Invenio v%s" % CFG_VERSION,
specific_params=("i:c:k:f",
[
"recid=",
"collection=",
"taxonomy=",
"force"
]),
task_submit_elaborate_specific_parameter_fnc=
_task_submit_elaborate_specific_parameter,
task_submit_check_options_fnc=_task_submit_check_options,
task_run_fnc=_task_run_core)
## PRIVATE METHODS
def _ontology_exists(ontology_name):
"""Check if the ontology name is registered in the database."""
if run_sql("SELECT name FROM clsMETHOD WHERE name=%s",
(ontology_name,)):
return True
return False
def _collection_exists(collection_name):
"""Check if the collection name is registered in the database."""
if run_sql("SELECT name FROM collection WHERE name=%s",
(collection_name,)):
return True
return False
def _recid_exists(recid):
"""Check if the recid number is registered in the database."""
if run_sql("SELECT id FROM bibrec WHERE id=%s",
(recid,)):
return True
return False
def _get_recids_foreach_ontology(recids=None, collections=None, taxonomy=None):
"""Returns an array containing hash objects containing the
collection, its corresponding ontology and the records belonging to
the given collection."""
rec_onts = []
# User specified record IDs.
if recids:
rec_onts.append({
'ontology': taxonomy,
'collection': None,
'recIDs': recids,
})
return rec_onts
# User specified collections.
if collections:
for collection in collections:
records = get_collection_reclist(collection)
if records:
rec_onts.append({
'ontology': taxonomy,
'collection': collection,
'recIDs': records
})
return rec_onts
# Use rules found in collection_clsMETHOD.
result = run_sql("SELECT clsMETHOD.name, clsMETHOD.last_updated, "
"collection.name FROM clsMETHOD JOIN collection_clsMETHOD ON "
"clsMETHOD.id=id_clsMETHOD JOIN collection ON "
"id_collection=collection.id")
for ontology, date_last_run, collection in result:
records = get_collection_reclist(collection)
if records:
if not date_last_run:
write_message("INFO: Collection %s has not been previously "
"analyzed." % collection, stream=sys.stderr, verbose=3)
modified_records = intbitset(run_sql("SELECT id FROM bibrec"))
elif task_get_option('force'):
write_message("INFO: Analysis is forced for collection %s." %
collection, stream=sys.stderr, verbose=3)
modified_records = intbitset(run_sql("SELECT id FROM bibrec"))
else:
modified_records = intbitset(run_sql("SELECT id FROM bibrec "
"WHERE modification_date >= %s", (date_last_run, )))
records &= modified_records
if records:
rec_onts.append({
'ontology': ontology,
'collection': collection,
'recIDs': records
})
else:
write_message("WARNING: All records from collection '%s' have "
"already been analyzed for keywords with ontology '%s' "
"on %s." % (collection, ontology, date_last_run),
stream=sys.stderr, verbose=2)
else:
write_message("ERROR: Collection '%s' doesn't contain any record. "
"Cannot analyse keywords." % collection, stream=sys.stderr,
verbose=0)
return rec_onts
def _update_date_of_last_run(runtime):
"""Update bibclassify daemon table information about last run time."""
run_sql("UPDATE clsMETHOD SET last_updated=%s", (runtime,))
def _task_submit_elaborate_specific_parameter(key, value, opts, args):
"""Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ('-n', '--number'):
task_get_option(\1) = value
return True
return False
"""
# Recid option
if key in ("-i", "--recid"):
try:
value = int(value)
except ValueError:
write_message("The value specified for --recid must be a "
"valid integer, not '%s'." % value, stream=sys.stderr,
verbose=0)
if not _recid_exists(value):
write_message("ERROR: '%s' is not a valid record ID." % value,
stream=sys.stderr, verbose=0)
return False
recids = task_get_option('recids')
if recids is None:
recids = []
recids.append(value)
task_set_option('recids', recids)
# Collection option
elif key in ("-c", "--collection"):
if not _collection_exists(value):
write_message("ERROR: '%s' is not a valid collection." % value,
stream=sys.stderr, verbose=0)
return False
collections = task_get_option("collections")
collections = collections or []
collections.append(value)
task_set_option("collections", collections)
# Taxonomy option
elif key in ("-k", "--taxonomy"):
if not _ontology_exists(value):
write_message("ERROR: '%s' is not a valid taxonomy name." % value,
stream=sys.stderr, verbose=0)
return False
task_set_option("taxonomy", value)
elif key in ("-f", "--force"):
task_set_option("force", True)
else:
return False
return True
def _task_run_core():
"""Runs anayse_documents for each ontology, collection, record ids
set."""
automated_daemon_mode_p = True
recids = task_get_option('recids')
collections = task_get_option('collections')
taxonomy = task_get_option('taxonomy')
if recids or collections:
# We want to run some records/collection only, so we are not
# in the automated daemon mode; this will be useful later.
automated_daemon_mode_p = False
# Check if the user specified which documents to extract keywords from.
if recids:
onto_recids = _get_recids_foreach_ontology(recids=recids,
taxonomy=taxonomy)
elif collections:
onto_recids = _get_recids_foreach_ontology(collections=collections,
taxonomy=taxonomy)
else:
onto_recids = _get_recids_foreach_ontology()
if not onto_recids:
# Nothing to do.
if automated_daemon_mode_p:
_update_date_of_last_run(task_get_task_param('task_starting_time'))
return 1
changes = []
changes.append('<?xml version="1.0" encoding="UTF-8"?>')
changes.append('<collection xmlns="http://www.loc.gov/MARC21/slim">')
# Count the total number of records in order to update the progression.
global _RECIDS_NUMBER
for onto_rec in onto_recids:
_RECIDS_NUMBER += len(onto_rec['recIDs'])
for onto_rec in onto_recids:
task_sleep_now_if_required(can_stop_too=False)
if onto_rec['collection'] is not None:
write_message('INFO: Applying taxonomy %s to collection %s (%s '
'records)' % (onto_rec['ontology'], onto_rec['collection'],
len(onto_rec['recIDs'])), stream=sys.stderr, verbose=3)
else:
write_message('INFO: Applying taxonomy %s to recIDs %s. ' %
(onto_rec['ontology'],
', '.join([str(recid) for recid in onto_rec['recIDs']])),
stream=sys.stderr, verbose=3)
if onto_rec['recIDs']:
changes.append(_analyze_documents(onto_rec['recIDs'],
onto_rec['ontology'], onto_rec['collection']))
changes.append('</collection>')
# Write the changes to a temporary file.
tmp_directory = "%s/bibclassify" % CFG_TMPDIR
filename = "bibclassifyd_%s.xml" % time.strftime("%Y%m%d%H%M%S",
time.localtime())
abs_path = os.path.join(tmp_directory, filename)
if not os.path.isdir(tmp_directory):
os.mkdir(tmp_directory)
file_desc = open(abs_path, "w")
file_desc.write('\n'.join(changes))
file_desc.close()
# Apply the changes.
if changes:
cmd = "%s/bibupload -n -c '%s' " % (CFG_BINDIR, abs_path)
errcode = 0
try:
errcode = os.system(cmd)
except OSError, exc:
write_message('ERROR: Command %s failed [%s].' % (cmd, exc),
stream=sys.stderr, verbose=0)
if errcode != 0:
write_message("ERROR: %s failed, error code is %d." %
(cmd, errcode), stream=sys.stderr, verbose=0)
return 0
# Update the date of last run in the clsMETHOD table, but only if
# we were running in an automated mode.
if automated_daemon_mode_p:
_update_date_of_last_run(task_get_task_param('task_starting_time'))
return 1
def _analyze_documents(records, ontology, collection):
"""For each collection, parse the documents attached to the records
in collection with the corresponding ontology."""
global _INDEX
if not records:
# No records could be found.
write_message("WARNING: No record were found in collection %s." %
collection, stream=sys.stderr, verbose=2)
return False
# Process records:
output = []
for record in records:
bibdocfiles = BibRecDocs(record).list_latest_files()
output.append('<record>')
output.append('<controlfield tag="001">%s</controlfield>' % record)
for doc in bibdocfiles:
# Get the keywords for each PDF document contained in the record.
if is_pdf(doc.get_full_path()):
write_message('INFO: Generating keywords for record %d.' %
record, stream=sys.stderr, verbose=3)
fulltext = doc.get_full_path()
output.append(output_keywords_for_local_file(fulltext,
taxonomy=ontology, output_mode="marcxml", output_limit=3,
match_mode="partial", with_author_keywords=True,
verbose=task_get_option('verbose')))
_INDEX += 1
output.append('</record>')
task_update_progress('Done %d out of %d.' % (_INDEX, _RECIDS_NUMBER))
task_sleep_now_if_required(can_stop_too=False)
return '\n'.join(output)
def _task_submit_check_options():
"""Required by bibtask. Checks the options."""
recids = task_get_option('recids')
collections = task_get_option('collections')
taxonomy = task_get_option('taxonomy')
# If a recid or a collection is specified, check that the taxonomy
# is also specified.
if (recids is not None or collections is not None) and \
taxonomy is None:
write_message("ERROR: When specifying a record ID or a collection, "
"you have to precise which\ntaxonomy to use.", stream=sys.stderr,
verbose=0)
return False
return True
# FIXME: outfiledesc can be multiple files, e.g. when processing
# 100000 records it is good to store results by 1000 records
# (see oaiharvest)
| gpl-2.0 |
Necromoshka/orderHistory | journal/journal/settings.py | 1 | 5285 | """
Django settings for journal project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dci(y5(-ni&o9#ueipzb21-ho08xdtc8_uu3vvsgjh=-y#b2a^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["192.168.0.51","127.0.0.1"]
# Application definition
INSTALLED_APPS = [
# 'bootstrap3',
# 'demo.apps.DemoConfig',
'userMain.apps.UsermainConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'journal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'journal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Default settings
#BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
# 'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
# 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
# 'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
# 'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
# 'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
# 'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
# 'include_jquery': False,
# Label class to use in horizontal forms
# 'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
# 'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields
# 'set_required': True,
# Set HTML disabled attribute on disabled fields
# 'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
# 'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
# 'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
# 'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
# 'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
# 'formset_renderers':{
# 'default': 'bootstrap3.renderers.FormsetRenderer',
# },
# 'form_renderers': {
# 'default': 'bootstrap3.renderers.FormRenderer',
# },
# 'field_renderers': {
# 'default': 'bootstrap3.renderers.FieldRenderer',
# 'inline': 'bootstrap3.renderers.InlineFieldRenderer',
# },
#} | mit |
scollis/iris | lib/iris/fileformats/name_loaders.py | 1 | 31766 | # (C) British Crown Copyright 2013 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""NAME file format loading functions."""
import collections
import datetime
from itertools import izip
import re
import warnings
import numpy as np
from iris.coords import AuxCoord, DimCoord, CellMethod
import iris.coord_systems
import iris.cube
from iris.exceptions import TranslationError
import iris.unit
EARTH_RADIUS = 6371229.0
NAMEIII_DATETIME_FORMAT = '%d/%m/%Y %H:%M %Z'
NAMEII_FIELD_DATETIME_FORMAT = '%H%M%Z %d/%m/%Y'
NAMEII_TIMESERIES_DATETIME_FORMAT = '%d/%m/%Y %H:%M:%S'
NAMECoord = collections.namedtuple('NAMECoord', ['name',
'dimension',
'values'])
def _split_name_and_units(name):
units = None
if "(" in name and ")" in name:
split = name.rsplit("(", 1)
try_units = split[1].replace(")", "").strip()
try:
try_units = iris.unit.Unit(try_units)
except ValueError:
pass
else:
name = split[0].strip()
units = try_units
return name, units
def read_header(file_handle):
"""
Return a dictionary containing the header information extracted
from the the provided NAME file object.
Args:
* file_handle (file-like object):
A file-like object from which to read the header information.
Returns:
A dictionary containing the extracted header information.
"""
header = {}
header['NAME Version'] = file_handle.next().strip()
for line in file_handle:
words = line.split(':', 1)
if len(words) != 2:
break
key, value = [word.strip() for word in words]
header[key] = value
# Cast some values into floats or integers if they match a
# given name. Set any empty string values to None.
for key, value in header.items():
if value:
if key in ['X grid origin', 'Y grid origin',
'X grid resolution', 'Y grid resolution']:
header[key] = float(value)
elif key in ['X grid size', 'Y grid size',
'Number of preliminary cols',
'Number of field cols',
'Number of fields',
'Number of series']:
header[key] = int(value)
else:
header[key] = None
return header
def _read_data_arrays(file_handle, n_arrays, shape):
"""
Return a list of NumPy arrays containing the data extracted from
the provided file object. The number and shape of the arrays
must be specified.
"""
data_arrays = [np.zeros(shape, dtype=np.float32) for
i in range(n_arrays)]
# Iterate over the remaining lines which represent the data in
# a column form.
for line in file_handle:
# Split the line by comma, removing the last empty column
# caused by the trailing comma
vals = line.split(',')[:-1]
# Cast the x and y grid positions to integers and convert
# them to zero based indices
x = int(float(vals[0])) - 1
y = int(float(vals[1])) - 1
# Populate the data arrays (i.e. all columns but the leading 4).
for i, data_array in enumerate(data_arrays):
data_array[y, x] = float(vals[i + 4])
return data_arrays
def _build_lat_lon_for_NAME_field(header):
"""
Return regular latitude and longitude coordinates extracted from
the provided header dictionary.
"""
start = header['X grid origin']
step = header['X grid resolution']
count = header['X grid size']
pts = start + np.arange(count, dtype=np.float64) * step
lon = NAMECoord(name='longitude', dimension=1, values=pts)
start = header['Y grid origin']
step = header['Y grid resolution']
count = header['Y grid size']
pts = start + np.arange(count, dtype=np.float64) * step
lat = NAMECoord(name='latitude', dimension=0, values=pts)
return lat, lon
def _build_lat_lon_for_NAME_timeseries(column_headings):
"""
Return regular latitude and longitude coordinates extracted from
the provided column_headings dictionary.
"""
pattern = re.compile(r'\-?[0-9]*\.[0-9]*')
new_Xlocation_column_header = []
for t in column_headings['X']:
if 'Lat-Long' in t:
matches = pattern.search(t)
new_Xlocation_column_header.append(float(matches.group(0)))
else:
new_Xlocation_column_header.append(t)
column_headings['X'] = new_Xlocation_column_header
lon = NAMECoord(name='longitude', dimension=None,
values=column_headings['X'])
new_Ylocation_column_header = []
for t in column_headings['Y']:
if 'Lat-Long' in t:
matches = pattern.search(t)
new_Ylocation_column_header.append(float(matches.group(0)))
else:
new_Ylocation_column_header.append(t)
column_headings['Y'] = new_Ylocation_column_header
lat = NAMECoord(name='latitude', dimension=None,
values=column_headings['Y'])
return lat, lon
def _calc_integration_period(time_avgs):
"""
Return a list of datetime.timedelta objects determined from the provided
list of averaging/integration period column headings.
"""
integration_periods = []
pattern = re.compile(
r'(\d{0,2})(day)?\s*(\d{1,2})(hr)?\s*(\d{1,2})(min)?\s*(\w*)')
for time_str in time_avgs:
days = 0
hours = 0
minutes = 0
matches = pattern.search(time_str)
if matches:
if len(matches.group(1)) > 0:
days = float(matches.group(1))
if len(matches.group(3)) > 0:
hours = float(matches.group(3))
if len(matches.group(1)) > 0:
minutes = float(matches.group(5))
total_hours = days * 24.0 + hours + minutes / 60.0
integration_periods.append(datetime.timedelta(hours=total_hours))
return integration_periods
def _parse_units(units):
"""
Return a known :class:`iris.unit.Unit` given a NAME unit
.. note::
* Some NAME units are not currently handled.
* Units which are in the wrong case (case is ignored in NAME)
* Units where the space between SI units is missing
* Units where the characters used are non-standard (i.e. 'mc' for
micro instead of 'u')
Args:
* units (string):
NAME units.
Returns:
An instance of :class:`iris.unit.Unit`.
"""
unit_mapper = {'Risks/m3': '1', # Used for Bluetongue
'TCID50s/m3': '1', # Used for Foot and Mouth
'TCID50/m3': '1', # Used for Foot and Mouth
'N/A': '1', # Used for CHEMET area at risk
'lb': 'pounds', # pounds
'oz': '1', # ounces
'deg': 'degree', # angular degree
'oktas': '1', # oktas
'deg C': 'deg_C', # degrees Celsius
'FL': 'unknown' # flight level
}
units = unit_mapper.get(units, units)
units = units.replace('Kg', 'kg')
units = units.replace('gs', 'g s')
units = units.replace('Bqs', 'Bq s')
units = units.replace('mcBq', 'uBq')
units = units.replace('mcg', 'ug')
try:
units = iris.unit.Unit(units)
except ValueError:
warnings.warn('Unknown units: {!r}'.format(units))
units = iris.unit.Unit(None)
return units
def _cf_height_from_name(z_coord):
"""
Parser for the z component of field headings.
This parse is specifically for handling the z component of NAME field
headings, which include height above ground level, height above sea level
and flight level etc. This function returns an iris coordinate
representing this field heading.
Args:
* z_coord (list):
A field heading, specifically the z component.
Returns:
An instance of :class:`iris.coords.AuxCoord` representing the
interpretation of the supplied field heading.
"""
# NAMEII - integer/float support.
# Match against height agl, asl and Pa.
pattern = re.compile(r'^From\s*'
'(?P<lower_bound>[0-9]+(\.[0-9]+)?)'
'\s*-\s*'
'(?P<upper_bound>[0-9]+(\.[0-9]+)?)'
'\s*(?P<type>m\s*asl|m\s*agl|Pa)'
'(?P<extra>.*)')
# Match against flight level.
pattern_fl = re.compile(r'^From\s*'
'(?P<type>FL)'
'(?P<lower_bound>[0-9]+(\.[0-9]+)?)'
'\s*-\s*FL'
'(?P<upper_bound>[0-9]+(\.[0-9]+)?)'
'(?P<extra>.*)')
# NAMEIII - integer/float support.
# Match scalar against height agl, asl, Pa, FL
pattern_scalar = re.compile(r'Z\s*=\s*'
'(?P<point>[0-9]+(\.[0-9]+)?)'
'\s*(?P<type>m\s*agl|m\s*asl|FL|Pa)'
'(?P<extra>.*)')
type_name = {'magl': 'height', 'masl': 'altitude', 'FL': 'flight_level',
'Pa': 'air_pressure'}
patterns = [pattern, pattern_fl, pattern_scalar]
units = 'no-unit'
points = z_coord
bounds = None
standard_name = None
long_name = 'z'
for pattern in patterns:
match = pattern.match(z_coord)
if match:
match = match.groupdict()
# Do not interpret if there is additional information to the match
if match['extra']:
break
units = match['type'].replace(' ', '')
name = type_name[units]
# Interpret points if present.
if 'point' in match:
points = float(match['point'])
# Interpret points from bounds.
else:
bounds = np.array([float(match['lower_bound']),
float(match['upper_bound'])])
points = bounds.sum() / 2.
long_name = None
if name == 'altitude':
units = units[0]
standard_name = name
long_name = 'altitude above sea level'
elif name == 'height':
units = units[0]
standard_name = name
long_name = 'height above ground level'
elif name == 'air_pressure':
standard_name = name
elif name == 'flight_level':
long_name = name
units = _parse_units(units)
break
coord = AuxCoord(points, units=units, standard_name=standard_name,
long_name=long_name, bounds=bounds)
return coord
def _generate_cubes(header, column_headings, coords, data_arrays,
cell_methods=None):
"""
Yield :class:`iris.cube.Cube` instances given
the headers, column headings, coords and data_arrays extracted
from a NAME file.
"""
for i, data_array in enumerate(data_arrays):
# Turn the dictionary of column headings with a list of header
# information for each field into a dictionary of headings for
# just this field.
field_headings = {k: v[i] for k, v in
column_headings.iteritems()}
# Make a cube.
cube = iris.cube.Cube(data_array)
# Determine the name and units.
name = '{} {}'.format(field_headings['Species'],
field_headings['Quantity'])
name = name.upper().replace(' ', '_')
cube.rename(name)
# Some units are not in SI units, are missing spaces or typed
# in the wrong case. _parse_units returns units that are
# recognised by Iris.
cube.units = _parse_units(field_headings['Unit'])
# Define and add the singular coordinates of the field (flight
# level, time etc.)
z_coord = _cf_height_from_name(field_headings['Z'])
cube.add_aux_coord(z_coord)
# Define the time unit and use it to serialise the datetime for
# the time coordinate.
time_unit = iris.unit.Unit(
'hours since epoch', calendar=iris.unit.CALENDAR_GREGORIAN)
# Build time, latitude and longitude coordinates.
for coord in coords:
pts = coord.values
coord_sys = None
if coord.name == 'latitude' or coord.name == 'longitude':
coord_units = 'degrees'
coord_sys = iris.coord_systems.GeogCS(EARTH_RADIUS)
if coord.name == 'time':
coord_units = time_unit
pts = time_unit.date2num(coord.values)
if coord.dimension is not None:
icoord = DimCoord(points=pts,
standard_name=coord.name,
units=coord_units,
coord_system=coord_sys)
if coord.name == 'time' and 'Av or Int period' in \
field_headings:
dt = coord.values - \
field_headings['Av or Int period']
bnds = time_unit.date2num(
np.vstack((dt, coord.values)).T)
icoord.bounds = bnds
else:
icoord.guess_bounds()
cube.add_dim_coord(icoord, coord.dimension)
else:
icoord = AuxCoord(points=pts[i],
standard_name=coord.name,
coord_system=coord_sys,
units=coord_units)
if coord.name == 'time' and 'Av or Int period' in \
field_headings:
dt = coord.values - \
field_headings['Av or Int period']
bnds = time_unit.date2num(
np.vstack((dt, coord.values)).T)
icoord.bounds = bnds[i, :]
cube.add_aux_coord(icoord)
# Headings/column headings which are encoded elsewhere.
headings = ['X', 'Y', 'Z', 'Time', 'Unit', 'Av or Int period',
'X grid origin', 'Y grid origin',
'X grid size', 'Y grid size',
'X grid resolution', 'Y grid resolution', ]
# Add the Main Headings as attributes.
for key, value in header.iteritems():
if value is not None and value != '' and \
key not in headings:
cube.attributes[key] = value
# Add the Column Headings as attributes
for key, value in field_headings.iteritems():
if value is not None and value != '' and \
key not in headings:
cube.attributes[key] = value
if cell_methods is not None:
cube.add_cell_method(cell_methods[i])
yield cube
def _build_cell_methods(av_or_ints, coord):
"""
Return a list of :class:`iris.coords.CellMethod` instances
based on the provided list of column heading entries and the
associated coordinate. If a given entry does not correspond to a cell
method (e.g. "No time averaging"), a value of None is inserted.
Args:
* av_or_ints (iterable of strings):
An iterable of strings containing the colummn heading entries
to be parsed.
* coord (string or :class:`iris.coords.Coord`):
The coordinate name (or :class:`iris.coords.Coord` instance)
to which the column heading entries refer.
Returns:
A list that is the same length as `av_or_ints` containing
:class:`iris.coords.CellMethod` instances or values of None.
"""
cell_methods = []
no_avg_pattern = re.compile(r'^(no( (.* )?averaging)?)?$', re.IGNORECASE)
for av_or_int in av_or_ints:
if no_avg_pattern.search(av_or_int) is not None:
cell_method = None
elif 'average' in av_or_int or 'averaged' in av_or_int:
cell_method = CellMethod('mean', coord)
elif 'integral' in av_or_int or 'integrated' in av_or_int:
cell_method = CellMethod('sum', coord)
else:
cell_method = None
msg = 'Unknown {} statistic: {!r}. Unable to create cell method.'
warnings.warn(msg.format(coord, av_or_int))
cell_methods.append(cell_method)
return cell_methods
def load_NAMEIII_field(filename):
"""
Load a NAME III grid output file returning a
generator of :class:`iris.cube.Cube` instances.
Args:
* filename (string):
Name of file to load.
Returns:
A generator :class:`iris.cube.Cube` instances.
"""
# Loading a file gives a generator of lines which can be progressed using
# the next() method. This will come in handy as we wish to progress
# through the file line by line.
with open(filename, 'r') as file_handle:
# Create a dictionary which can hold the header metadata about this
# file.
header = read_header(file_handle)
# Skip the next line (contains the word Fields:) in the file.
file_handle.next()
# Read the lines of column definitions.
# In this version a fixed order of column headings is assumed (and
# first 4 columns are ignored).
column_headings = {}
for column_header_name in ['Species Category', 'Name', 'Quantity',
'Species', 'Unit', 'Sources', 'Ensemble Av',
'Time Av or Int', 'Horizontal Av or Int',
'Vertical Av or Int', 'Prob Perc',
'Prob Perc Ens', 'Prob Perc Time',
'Time', 'Z', 'D']:
cols = [col.strip() for col in file_handle.next().split(',')]
column_headings[column_header_name] = cols[4:-1]
# Convert the time to python datetimes.
new_time_column_header = []
for i, t in enumerate(column_headings['Time']):
dt = datetime.datetime.strptime(t, NAMEIII_DATETIME_FORMAT)
new_time_column_header.append(dt)
column_headings['Time'] = new_time_column_header
# Convert averaging/integrating period to timedeltas.
column_headings['Av or Int period'] = _calc_integration_period(
column_headings['Time Av or Int'])
# Build a time coordinate.
tdim = NAMECoord(name='time', dimension=None,
values=np.array(column_headings['Time']))
cell_methods = _build_cell_methods(column_headings['Time Av or Int'],
tdim.name)
# Build regular latitude and longitude coordinates.
lat, lon = _build_lat_lon_for_NAME_field(header)
coords = [lon, lat, tdim]
# Skip the line after the column headings.
file_handle.next()
# Create data arrays to hold the data for each column.
n_arrays = header['Number of field cols']
shape = (header['Y grid size'], header['X grid size'])
data_arrays = _read_data_arrays(file_handle, n_arrays, shape)
return _generate_cubes(header, column_headings, coords, data_arrays,
cell_methods)
def load_NAMEII_field(filename):
"""
Load a NAME II grid output file returning a
generator of :class:`iris.cube.Cube` instances.
Args:
* filename (string):
Name of file to load.
Returns:
A generator :class:`iris.cube.Cube` instances.
"""
with open(filename, 'r') as file_handle:
# Create a dictionary which can hold the header metadata about this
# file.
header = read_header(file_handle)
# Origin in namever=2 format is bottom-left hand corner so alter this
# to centre of a grid box
header['X grid origin'] = header['X grid origin'] + \
header['X grid resolution'] / 2
header['Y grid origin'] = header['Y grid origin'] + \
header['Y grid resolution'] / 2
# Read the lines of column definitions.
# In this version a fixed order of column headings is assumed (and
# first 4 columns are ignored).
column_headings = {}
for column_header_name in ['Species Category', 'Species',
'Time Av or Int', 'Quantity',
'Unit', 'Z', 'Time']:
cols = [col.strip() for col in file_handle.next().split(',')]
column_headings[column_header_name] = cols[4:-1]
# Convert the time to python datetimes
new_time_column_header = []
for i, t in enumerate(column_headings['Time']):
dt = datetime.datetime.strptime(t, NAMEII_FIELD_DATETIME_FORMAT)
new_time_column_header.append(dt)
column_headings['Time'] = new_time_column_header
# Convert averaging/integrating period to timedeltas.
pattern = re.compile(r'\s*(\d{3})\s*(hr)?\s*(time)\s*(\w*)')
column_headings['Av or Int period'] = []
for i, t in enumerate(column_headings['Time Av or Int']):
matches = pattern.search(t)
hours = 0
if matches:
if len(matches.group(1)) > 0:
hours = float(matches.group(1))
column_headings['Av or Int period'].append(
datetime.timedelta(hours=hours))
# Build a time coordinate.
tdim = NAMECoord(name='time', dimension=None,
values=np.array(column_headings['Time']))
cell_methods = _build_cell_methods(column_headings['Time Av or Int'],
tdim.name)
# Build regular latitude and longitude coordinates.
lat, lon = _build_lat_lon_for_NAME_field(header)
coords = [lon, lat, tdim]
# Skip the blank line after the column headings.
file_handle.next()
# Create data arrays to hold the data for each column.
n_arrays = header['Number of fields']
shape = (header['Y grid size'], header['X grid size'])
data_arrays = _read_data_arrays(file_handle, n_arrays, shape)
return _generate_cubes(header, column_headings, coords, data_arrays,
cell_methods)
def load_NAMEIII_timeseries(filename):
"""
Load a NAME III time series file returning a
generator of :class:`iris.cube.Cube` instances.
Args:
* filename (string):
Name of file to load.
Returns:
A generator :class:`iris.cube.Cube` instances.
"""
with open(filename, 'r') as file_handle:
# Create a dictionary which can hold the header metadata about this
# file.
header = read_header(file_handle)
# skip the next line (contains the word Fields:) in the file.
file_handle.next()
# Read the lines of column definitions - currently hardwired
column_headings = {}
for column_header_name in ['Species Category', 'Name', 'Quantity',
'Species', 'Unit', 'Sources', 'Ens Av',
'Time Av or Int', 'Horizontal Av or Int',
'Vertical Av or Int', 'Prob Perc',
'Prob Perc Ens', 'Prob Perc Time',
'Location', 'X', 'Y', 'Z', 'D']:
cols = [col.strip() for col in file_handle.next().split(',')]
column_headings[column_header_name] = cols[1:-1]
# Determine the coordinates of the data and store in namedtuples.
# Extract latitude and longitude information from X, Y location
# headings.
lat, lon = _build_lat_lon_for_NAME_timeseries(column_headings)
# Convert averaging/integrating period to timedeltas.
column_headings['Av or Int period'] = _calc_integration_period(
column_headings['Time Av or Int'])
# Skip the line after the column headings.
file_handle.next()
# Make a list of data lists to hold the data for each column.
data_lists = [[] for i in range(header['Number of field cols'])]
time_list = []
# Iterate over the remaining lines which represent the data in a
# column form.
for line in file_handle:
# Split the line by comma, removing the last empty column caused
# by the trailing comma.
vals = line.split(',')[:-1]
# Time is stored in the first column.
t = vals[0].strip()
dt = datetime.datetime.strptime(t, NAMEIII_DATETIME_FORMAT)
time_list.append(dt)
# Populate the data arrays.
for i, data_list in enumerate(data_lists):
data_list.append(float(vals[i + 1]))
data_arrays = [np.array(l) for l in data_lists]
time_array = np.array(time_list)
tdim = NAMECoord(name='time', dimension=0, values=time_array)
coords = [lon, lat, tdim]
return _generate_cubes(header, column_headings, coords, data_arrays)
def load_NAMEII_timeseries(filename):
"""
Load a NAME II Time Series file returning a
generator of :class:`iris.cube.Cube` instances.
Args:
* filename (string):
Name of file to load.
Returns:
A generator :class:`iris.cube.Cube` instances.
"""
with open(filename, 'r') as file_handle:
# Create a dictionary which can hold the header metadata about this
# file.
header = read_header(file_handle)
# Read the lines of column definitions.
column_headings = {}
for column_header_name in ['Y', 'X', 'Location',
'Species Category', 'Species',
'Quantity', 'Z', 'Unit']:
cols = [col.strip() for col in file_handle.next().split(',')]
column_headings[column_header_name] = cols[1:-1]
# Determine the coordinates of the data and store in namedtuples.
# Extract latitude and longitude information from X, Y location
# headings.
lat, lon = _build_lat_lon_for_NAME_timeseries(column_headings)
# Skip the blank line after the column headings.
file_handle.next()
# Make a list of data arrays to hold the data for each column.
data_lists = [[] for i in range(header['Number of series'])]
time_list = []
# Iterate over the remaining lines which represent the data in a
# column form.
for line in file_handle:
# Split the line by comma, removing the last empty column caused
# by the trailing comma.
vals = line.split(',')[:-1]
# Time is stored in the first two columns.
t = (vals[0].strip() + ' ' + vals[1].strip())
dt = datetime.datetime.strptime(
t, NAMEII_TIMESERIES_DATETIME_FORMAT)
time_list.append(dt)
# Populate the data arrays.
for i, data_list in enumerate(data_lists):
data_list.append(float(vals[i + 2]))
data_arrays = [np.array(l) for l in data_lists]
time_array = np.array(time_list)
tdim = NAMECoord(name='time', dimension=0, values=time_array)
coords = [lon, lat, tdim]
return _generate_cubes(header, column_headings, coords, data_arrays)
def load_NAMEIII_trajectory(filename):
"""
Load a NAME III trajectory file returning a
generator of :class:`iris.cube.Cube` instances.
Args:
* filename (string):
Name of file to load.
Returns:
A generator :class:`iris.cube.Cube` instances.
"""
time_unit = iris.unit.Unit('hours since epoch',
calendar=iris.unit.CALENDAR_GREGORIAN)
with open(filename, 'r') as infile:
header = read_header(infile)
# read the column headings
for line in infile:
if line.startswith(" "):
break
headings = [heading.strip() for heading in line.split(",")]
# read the columns
columns = [[] for i in range(len(headings))]
for line in infile:
values = [v.strip() for v in line.split(",")]
for c, v in enumerate(values):
if "UTC" in v:
v = v.replace(":00 ", " ") # Strip out milliseconds.
v = datetime.datetime.strptime(v, NAMEIII_DATETIME_FORMAT)
else:
try:
v = float(v)
except ValueError:
pass
columns[c].append(v)
# Where's the Z column?
z_column = None
for i, heading in enumerate(headings):
if heading.startswith("Z "):
z_column = i
break
if z_column is None:
raise iris.exceptions.TranslationError("Expected a Z column")
# Every column up to Z becomes a coordinate.
coords = []
for name, values in izip(headings[:z_column+1], columns[:z_column+1]):
values = np.array(values)
if np.all(np.array(values) == values[0]):
values = [values[0]]
standard_name = long_name = units = None
if isinstance(values[0], datetime.datetime):
values = time_unit.date2num(values)
units = time_unit
if name == "Time":
name = "time"
elif " (Lat-Long)" in name:
if name.startswith("X"):
name = "longitude"
elif name.startswith("Y"):
name = "latitude"
units = "degrees"
elif name == "Z (m asl)":
name = "height"
units = "m"
long_name = "height above sea level"
try:
coord = DimCoord(values, units=units)
except ValueError:
coord = AuxCoord(values, units=units)
coord.rename(name)
if coord.long_name is None and long_name is not None:
coord.long_name = long_name
coords.append(coord)
# Every numerical column after the Z becomes a cube.
for name, values in izip(headings[z_column+1:], columns[z_column+1:]):
try:
float(values[0])
except ValueError:
continue
# units embedded in column heading?
name, units = _split_name_and_units(name)
cube = iris.cube.Cube(values, units=units)
cube.rename(name)
for coord in coords:
dim = 0 if len(coord.points) > 1 else None
if isinstance(coord, DimCoord) and coord.name() == "time":
cube.add_dim_coord(coord.copy(), dim)
else:
cube.add_aux_coord(coord.copy(), dim)
yield cube
| gpl-3.0 |
dav1x/ansible | lib/ansible/utils/module_docs_fragments/junos.py | 101 | 2910 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. The port value will default to the well known SSH port
of 22 (for C(transport=cli)) or port 830 (for C(transport=netconf))
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the key
used to authenticate the SSH session. If the value is not specified in
the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
"""
| gpl-3.0 |
mintcloud/deep-learning | face_generation/helper.py | 160 | 8114 | import math
import os
import hashlib
from urllib.request import urlretrieve
import zipfile
import gzip
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
def _read32(bytestream):
"""
Read 32-bit integer from bytesteam
:param bytestream: A bytestream
:return: 32-bit integer
"""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def _unzip(save_path, _, database_name, data_path):
"""
Unzip wrapper with the same interface as _ungzip
:param save_path: The path of the gzip files
:param database_name: Name of database
:param data_path: Path to extract to
:param _: HACK - Used to have to same interface as _ungzip
"""
print('Extracting {}...'.format(database_name))
with zipfile.ZipFile(save_path) as zf:
zf.extractall(data_path)
def _ungzip(save_path, extract_path, database_name, _):
"""
Unzip a gzip file and extract it to extract_path
:param save_path: The path of the gzip files
:param extract_path: The location to extract the data to
:param database_name: Name of database
:param _: HACK - Used to have to same interface as _unzip
"""
# Get data from save_path
with open(save_path, 'rb') as f:
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number {} in file: {}'.format(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols)
# Save data to extract_path
for image_i, image in enumerate(
tqdm(data, unit='File', unit_scale=True, miniters=1, desc='Extracting {}'.format(database_name))):
Image.fromarray(image, 'L').save(os.path.join(extract_path, 'image_{}.jpg'.format(image_i)))
def get_image(image_path, width, height, mode):
"""
Read image from image_path
:param image_path: Path of image
:param width: Width of image
:param height: Height of image
:param mode: Mode of image
:return: Image data
"""
image = Image.open(image_path)
if image.size != (width, height): # HACK - Check if image is from the CELEBA dataset
# Remove most pixels that aren't part of a face
face_width = face_height = 108
j = (image.size[0] - face_width) // 2
i = (image.size[1] - face_height) // 2
image = image.crop([j, i, j + face_width, i + face_height])
image = image.resize([width, height], Image.BILINEAR)
return np.array(image.convert(mode))
def get_batch(image_files, width, height, mode):
data_batch = np.array(
[get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
def images_square_grid(images, mode):
"""
Save images as a square grid
:param images: Images to be used for the grid
:param mode: The mode to use for images
:return: Image of images in a square grid
"""
# Get maximum size for square grid of images
save_size = math.floor(np.sqrt(images.shape[0]))
# Scale to 0-255
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# Put images in a square arrangement
images_in_square = np.reshape(
images[:save_size*save_size],
(save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))
if mode == 'L':
images_in_square = np.squeeze(images_in_square, 4)
# Combine images to grid image
new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
def download_extract(database_name, data_path):
"""
Download and extract database
:param database_name: Database name
"""
DATASET_CELEBA_NAME = 'celeba'
DATASET_MNIST_NAME = 'mnist'
if database_name == DATASET_CELEBA_NAME:
url = 'https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip'
hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'
extract_path = os.path.join(data_path, 'img_align_celeba')
save_path = os.path.join(data_path, 'celeba.zip')
extract_fn = _unzip
elif database_name == DATASET_MNIST_NAME:
url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
hash_code = 'f68b3c2dcbeaaa9fbdd348bbdeb94873'
extract_path = os.path.join(data_path, 'mnist')
save_path = os.path.join(data_path, 'train-images-idx3-ubyte.gz')
extract_fn = _ungzip
if os.path.exists(extract_path):
print('Found {} Data'.format(database_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, database_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
# Remove compressed data
os.remove(save_path)
class Dataset(object):
"""
Dataset
"""
def __init__(self, dataset_name, data_files):
"""
Initalize the class
:param dataset_name: Database name
:param data_files: List of files in the database
"""
DATASET_CELEBA_NAME = 'celeba'
DATASET_MNIST_NAME = 'mnist'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
if dataset_name == DATASET_CELEBA_NAME:
self.image_mode = 'RGB'
image_channels = 3
elif dataset_name == DATASET_MNIST_NAME:
self.image_mode = 'L'
image_channels = 1
self.data_files = data_files
self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels
def get_batches(self, batch_size):
"""
Generate batches
:param batch_size: Batch Size
:return: Batches of data
"""
IMAGE_MAX_VALUE = 255
current_index = 0
while current_index + batch_size <= self.shape[0]:
data_batch = get_batch(
self.data_files[current_index:current_index + batch_size],
*self.shape[1:3],
self.image_mode)
current_index += batch_size
yield data_batch / IMAGE_MAX_VALUE - 0.5
class DLProgress(tqdm):
"""
Handle Progress Bar while Downloading
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
A hook function that will be called once on establishment of the network connection and
once after each block read thereafter.
:param block_num: A count of blocks transferred so far
:param block_size: Block size in bytes
:param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
a file size in response to a retrieval request.
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
| mit |
guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/unittest_util/page_test_test_case.py | 5 | 3967 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provide a TestCase base class for PageTest subclasses' unittests."""
import unittest
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal import story_runner
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import results_options
from telemetry.unittest_util import options_for_unittests
class BasicTestPage(page_module.Page):
def __init__(self, url, page_set, base_dir):
super(BasicTestPage, self).__init__(url, page_set, base_dir)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageTestTestCase(unittest.TestCase):
"""A base class to simplify writing unit tests for PageTest subclasses."""
def CreatePageSetFromFileInUnittestDataDir(self, test_filename):
ps = self.CreateEmptyPageSet()
page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir)
ps.AddUserStory(page)
return ps
def CreateEmptyPageSet(self):
base_dir = util.GetUnittestDataDir()
ps = page_set_module.PageSet(file_path=base_dir)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
story_runner.AddCommandLineArgs(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options.browser_options)
options.output_file = None
options.output_formats = ['none']
options.suppress_gtest_report = True
options.output_trace_tag = None
story_runner.ProcessCommandLineArgs(temp_parser, options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
story_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
start_tracing_called = [False]
stop_tracing_called = [False]
class BuggyMeasurement(measurement_class):
def __init__(self, *args, **kwargs):
measurement_class.__init__(self, *args, **kwargs)
# Inject fake tracing methods to tracing_controller
def TabForPage(self, page, browser):
ActualStartTracing = browser.platform.tracing_controller.Start
def FakeStartTracing(*args, **kwargs):
ActualStartTracing(*args, **kwargs)
start_tracing_called[0] = True
raise exceptions.IntentionalException
browser.StartTracing = FakeStartTracing
ActualStopTracing = browser.platform.tracing_controller.Stop
def FakeStopTracing(*args, **kwargs):
result = ActualStopTracing(*args, **kwargs)
stop_tracing_called[0] = True
return result
browser.platform.tracing_controller.Stop = FakeStopTracing
return measurement_class.TabForPage(self, page, browser)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps, options=options)
except page_test.TestNotSupportedOnPlatformError:
pass
if start_tracing_called[0]:
self.assertTrue(stop_tracing_called[0])
| bsd-3-clause |
fhaoquan/kbengine | kbe/src/lib/python/Lib/gettext.py | 90 | 17661 | """Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import locale, copy, io, os, re, struct, sys
from errno import ENOENT
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'dgettext', 'dngettext', 'gettext', 'ngettext',
]
_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python lambda function that implements an equivalent expression.
"""
# Security check, allow only the "n" identifier
import token, tokenize
tokens = tokenize.generate_tokens(io.StringIO(plural).readline)
try:
danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n']
except tokenize.TokenError:
raise ValueError('plural forms expression error, maybe unbalanced parenthesis')
else:
if danger:
raise ValueError('plural forms expression could be dangerous')
# Replace some C operators by their Python equivalents
plural = plural.replace('&&', ' and ')
plural = plural.replace('||', ' or ')
expr = re.compile(r'\!([^=])')
plural = expr.sub(' not \\1', plural)
# Regular expression and replacement function used to transform
# "a?b:c" to "b if a else c".
expr = re.compile(r'(.*?)\?(.*?):(.*)')
def repl(x):
return "(%s if %s else %s)" % (x.group(2), x.group(1),
expr.sub(repl, x.group(3)))
# Code to transform the plural expression, taking care of parentheses
stack = ['']
for c in plural:
if c == '(':
stack.append('')
elif c == ')':
if len(stack) == 1:
# Actually, we never reach this code, because unbalanced
# parentheses get caught in the security check at the
# beginning.
raise ValueError('unbalanced parenthesis in plural form')
s = expr.sub(repl, stack.pop())
stack[-1] += '(%s)' % s
else:
stack[-1] += c
plural = expr.sub(repl, stack.pop())
return eval('lambda n: int(%s)' % plural)
def _expand_lang(loc):
loc = locale.normalize(loc)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = loc.find('@')
if pos >= 0:
modifier = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = loc.find('.')
if pos >= 0:
codeset = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = loc.find('_')
if pos >= 0:
territory = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = loc
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
if self._fallback:
return self._fallback.lgettext(message)
return message
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
return self._output_charset
def set_output_charset(self, charset):
self._output_charset = charset
def install(self, names=None):
import builtins
builtins.__dict__['_'] = self.gettext
if hasattr(names, "__contains__"):
if "gettext" in names:
builtins.__dict__['gettext'] = builtins.__dict__['_']
if "ngettext" in names:
builtins.__dict__['ngettext'] = self.ngettext
if "lgettext" in names:
builtins.__dict__['lgettext'] = self.lgettext
if "lngettext" in names:
builtins.__dict__['lngettext'] = self.lngettext
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
unpack = struct.unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1) # germanic plural by default
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise OSError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in range(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx+8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise OSError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastk = k = None
for b_item in tmsg.split('\n'.encode("ascii")):
item = b_item.decode().strip()
if not item:
continue
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
# Note: we unconditionally convert both msgids and msgstrs to
# Unicode using the character encoding specified in the charset
# parameter of the Content-Type header. The gettext documentation
# strongly encourages msgids to be us-ascii, but some applications
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
# traditional gettext applications, the msgid conversion will
# cause no problems since us-ascii should always be a subset of
# the charset encoding. We may want to fall back to 8-bit msgids
# if the Unicode conversion fails.
charset = self._charset or 'ascii'
if b'\x00' in msg:
# Plural forms
msgid1, msgid2 = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
msgid1 = str(msgid1, charset)
for i, x in enumerate(tmsg):
catalog[(msgid1, i)] = str(x, charset)
else:
catalog[str(msg, charset)] = str(tmsg, charset)
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def lgettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def lngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
return tmsg
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None, all=False):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
# a mapping between absolute .mo file path and Translation object
_translations = {}
def translation(domain, localedir=None, languages=None,
class_=None, fallback=False, codeset=None):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=True)
if not mofiles:
if fallback:
return NullTranslations()
raise OSError(ENOENT, 'No translation file found for domain', domain)
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
result = None
for mofile in mofiles:
key = (class_, os.path.abspath(mofile))
t = _translations.get(key)
if t is None:
with open(mofile, 'rb') as fp:
t = _translations.setdefault(key, class_(fp))
# Copy the translation object to allow setting fallbacks and
# output charset. All other instance data is shared with the
# cached object.
t = copy.copy(t)
if codeset:
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, codeset=None, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(names)
# a mapping b/w domains and locale directories
_localedirs = {}
# a mapping b/w domains and codesets
_localecodesets = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.gettext(message)
def ldgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.lngettext(msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
return ldngettext(_current_domain, msgid1, msgid2, n)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| lgpl-3.0 |
projectatomic/atomic-reactor | tests/utils/test_yum.py | 1 | 1699 | """
Copyright (c) 2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Very small subset of tests for the YumRepo class. Most testing
is done in test_add_yum_repo_by_url
"""
from __future__ import absolute_import
from fnmatch import fnmatch
import os
import sys
from atomic_reactor.utils.yum import YumRepo
import pytest
@pytest.mark.parametrize(('repourl', 'add_hash', 'pattern'), (
('http://example.com/a/b/c/myrepo.repo', True, 'myrepo-?????.repo'),
('http://example.com/a/b/c/myrepo', True, 'myrepo-?????.repo'),
('http://example.com/repo-2.repo', True, 'repo-2-?????.repo'),
('http://example.com/repo-2', True, 'repo-2-?????.repo'),
('http://example.com/spam/myrepo.repo', True, 'myrepo-?????.repo'),
('http://example.com/bacon/myrepo', True, 'myrepo-?????.repo'),
('http://example.com/spam/myrepo-608de.repo', False, 'myrepo-?????.repo'),
))
def test_add_repo_to_url(repourl, add_hash, pattern):
repo = YumRepo(repourl, add_hash=add_hash)
assert repo.repourl == repourl
assert fnmatch(repo.filename, pattern)
def test_invalid_config():
repo = YumRepo('http://example.com/a/b/c/myrepo.repo', 'line noise')
if (sys.version_info < (3, 0)):
assert not repo.is_valid()
else:
assert True
def test_write_content(tmpdir):
test_content = 'test_content'
repo = YumRepo(
repourl='http://example.com/a/b/c/myrepo.repo', content=test_content,
dst_repos_dir=str(tmpdir)
)
repo.write_content()
with open(os.path.join(str(tmpdir), repo.filename)) as f:
assert f.read() == test_content
| bsd-3-clause |
sledz/oe | contrib/source-checker/oe-checksums-sorter.py | 40 | 2995 | #!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# Copyright (C) 2007 OpenedHand
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# OpenEmbedded source checksums sorter
#
# This script parse conf/checksums.ini and sorts it alphabetically by archive
# name source archive. Duplicate entries are removed.
#
# Run it:
#
# oe-checksums-sorter.py path-to-conf/checksums.ini
#
#
import ConfigParser
import getopt
import os
import sys
import tempfile
def usage(rc):
print """usage: %s [--inplace|-i] conf/checksums.ini
--inplace, -i: update file in place (default is to write to stdout)
If no input file is given, will read from standard input.
""" % sys.argv[0]
sys.exit(rc)
try:
optlist, args = getopt.getopt(sys.argv[1:], "ih", ["inplace", "help"])
except getopt.GetoptError, e:
print >> sys.stderr, "%s: %s" % (sys.argv[0], e)
usage(1)
inplace = False
infp = sys.stdin
filename = None
for opt, val in optlist:
if opt == '-i' or opt == '--inplace':
inplace = True
elif opt == 'h' or opt == '--help':
usage(0)
else:
print >> sys.stderr, "%s: %s: invalid argument" % (sys.argv[0], opt)
usage(1)
if len(args) == 0:
if inplace:
print >> sys.stderr, "%s: --inplace requires a filename" % sys.argv[0]
usage(1)
elif len(args) == 1:
filename = args[0]
try:
infp = open(filename, "r")
except Exception, e:
print >> sys.stderr, "%s: %s" % (sys.argv[0], e)
sys.exit(1)
else:
print >> sys.stderr, "%s: extra arguments" % sys.argv[0]
usage(1)
out = sys.stdout
tmpfn = None
if inplace:
outfd, tmpfn = tempfile.mkstemp(prefix='cksums',
dir=os.path.dirname(filename) or '.')
os.chmod(tmpfn, os.stat(filename).st_mode)
out = os.fdopen(outfd, 'w')
checksums_parser = ConfigParser.ConfigParser()
checksums_parser.readfp(infp)
new_list = []
seen = {}
for source in checksums_parser.sections():
archive = source.split("/")[-1]
md5 = checksums_parser.get(source, "md5")
sha = checksums_parser.get(source, "sha256")
tup = (archive, source, md5, sha)
if not seen.has_key(tup):
new_list.append(tup)
seen[tup] = 1
new_list.sort()
for entry in new_list:
print >> out, "[%s]\nmd5=%s\nsha256=%s\n" % (entry[1], entry[2], entry[3])
if inplace:
out.close()
os.rename(tmpfn, filename)
| mit |
mrshelly/openerp71313 | openerp/addons/mrp/report/price.py | 19 | 11417 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import pooler
from openerp.report.interface import report_rml
from openerp.tools import to_xml
from openerp.report import report_sxw
from datetime import datetime
from openerp.tools.translate import _
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, datas, context=None):
number = (datas.get('form', False) and datas['form']['number']) or 1
pool = pooler.get_pool(cr.dbname)
product_pool = pool.get('product.product')
product_uom_pool = pool.get('product.uom')
supplier_info_pool = pool.get('product.supplierinfo')
workcenter_pool = pool.get('mrp.workcenter')
user_pool = pool.get('res.users')
bom_pool = pool.get('mrp.bom')
pricelist_pool = pool.get('product.pricelist')
rml_obj=report_sxw.rml_parse(cr, uid, product_pool._name,context)
rml_obj.localcontext.update({'lang':context.get('lang',False)})
company_currency = user_pool.browse(cr, uid, uid).company_id.currency_id
company_currency_symbol = company_currency.symbol or company_currency.name
def process_bom(bom, currency_id, factor=1):
xml = '<row>'
sum = 0
sum_strd = 0
prod = product_pool.browse(cr, uid, bom['product_id'])
prod_name = to_xml(bom['name'])
prod_qtty = factor * bom['product_qty']
product_uom = product_uom_pool.browse(cr, uid, bom['product_uom'], context=context)
product_uom_name = to_xml(product_uom.name)
main_sp_price, main_sp_name , main_strd_price = '','',''
sellers, sellers_price = '',''
if prod.seller_id:
main_sp_name = '- <b>'+ to_xml(prod.seller_id.name) +'</b>\r\n'
pricelist = prod.seller_id.property_product_pricelist_purchase
price = pricelist_pool.price_get(cr,uid,[pricelist.id],
prod.id, number*prod_qtty or 1.0, prod.seller_id.id, {
'uom': prod.uom_po_id.id,
'date': time.strftime('%Y-%m-%d'),
})[pricelist.id]
main_sp_price = """<b>"""+rml_obj.formatLang(price)+' '+ (company_currency_symbol)+"""</b>\r\n"""
sum += prod_qtty*price
std_price = product_uom_pool._compute_price(cr, uid, prod.uom_id.id, prod.standard_price, to_uom_id=product_uom.id)
main_strd_price = str(std_price) + '\r\n'
sum_strd = prod_qtty*std_price
for seller_id in prod.seller_ids:
sellers += '- <i>'+ to_xml(seller_id.name.name) +'</i>\r\n'
pricelist = seller_id.name.property_product_pricelist_purchase
price = pricelist_pool.price_get(cr,uid,[pricelist.id],
prod.id, number*prod_qtty or 1.0, seller_id.name.id, {
'uom': prod.uom_po_id.id,
'date': time.strftime('%Y-%m-%d'),
})[pricelist.id]
sellers_price += """<i>"""+rml_obj.formatLang(price) +' '+ (company_currency_symbol) +"""</i>\r\n"""
xml += """<col para='yes'> """+ prod_name +""" </col>
<col para='yes'> """+ main_sp_name + sellers + """ </col>
<col f='yes'>"""+ rml_obj.formatLang(prod_qtty) +' '+ product_uom_name +"""</col>
<col f='yes'>"""+ rml_obj.formatLang(float(main_strd_price)) +' '+ (company_currency_symbol) +"""</col>
<col f='yes'>""" + main_sp_price + sellers_price + """</col>'"""
xml += '</row>'
return xml, sum, sum_strd
def process_workcenter(wrk):
workcenter = workcenter_pool.browse(cr, uid, wrk['workcenter_id'])
cost_cycle = wrk['cycle']*workcenter.costs_cycle
cost_hour = wrk['hour']*workcenter.costs_hour
total = cost_cycle + cost_hour
xml = '<row>'
xml += "<col para='yes'>" + to_xml(workcenter.name) + '</col>'
xml += "<col/>"
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_cycle)+' '+ (company_currency_symbol) + """</col>"""
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_hour)+' '+ (company_currency_symbol) + """</col>"""
xml += """<col f='yes'>"""+rml_obj.formatLang(cost_hour + cost_cycle)+' '+ (company_currency_symbol) + """</col>"""
xml += '</row>'
return xml, total
xml = ''
config_start = """
<config>
<date>""" + to_xml(rml_obj.formatLang(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),date_time=True)) + """</date>
<company>%s</company>
<PageSize>210.00mm,297.00mm</PageSize>
<PageWidth>595.27</PageWidth>
<PageHeight>841.88</PageHeight>
<tableSize>55.00mm,58.00mm,29.00mm,29.00mm,29.00mm</tableSize>
""" % to_xml(user_pool.browse(cr, uid, uid).company_id.name)
config_stop = """
<report-footer>Generated by OpenERP</report-footer>
</config>
"""
workcenter_header = """
<lines style='header'>
<row>
<col>%s</col>
<col t='yes'/>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
</row>
</lines>
""" % (_('Work Center name'), _('Cycles Cost'), _('Hourly Cost'),_('Work Cost'))
prod_header = """
<row>
<col>%s</col>
<col>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
<col t='yes'>%s</col>
</row>
""" % (_('Components'), _('Components suppliers'), _('Quantity'),_('Cost Price per Unit of Measure'), _('Supplier Price per Unit of Measure'))
purchase_price_digits = rml_obj.get_digits(dp='Product Price')
for product in product_pool.browse(cr, uid, ids, context=context):
product_uom_name = to_xml(product.uom_id.name)
bom_id = bom_pool._bom_find(cr, uid, product.id, product.uom_id.id)
title = "<title>%s</title>" %(_("Cost Structure"))
title += "<title>%s</title>" % (to_xml(product.name))
xml += "<lines style='header'>" + title + prod_header + "</lines>"
if not bom_id:
total_strd = number * product.standard_price
total = number * product_pool.price_get(cr, uid, [product.id], 'standard_price')[product.id]
xml += """<lines style='lines'><row>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
<col para='yes'>-</col>
</row></lines>"""
xml += """<lines style='total'> <row>
<col> """ + _('Total Cost of %s %s') % (str(number), product_uom_name) + """: </col>
<col/>
<col f='yes'/>
<col t='yes'>"""+ rml_obj.formatLang(total_strd, digits=purchase_price_digits) +' '+ (company_currency_symbol) + """</col>
<col t='yes'>"""+ rml_obj.formatLang(total, digits=purchase_price_digits) +' '+ (company_currency_symbol) + """</col>
</row></lines>'"""
else:
bom = bom_pool.browse(cr, uid, bom_id, context=context)
factor = number * product.uom_id.factor / bom.product_uom.factor
sub_boms = bom_pool._bom_explode(cr, uid, bom, factor / bom.product_qty)
total = 0
total_strd = 0
parent_bom = {
'product_qty': bom.product_qty,
'name': bom.product_id.name,
'product_uom': bom.product_uom.id,
'product_id': bom.product_id.id
}
xml_tmp = ''
for sub_bom in (sub_boms and sub_boms[0]) or [parent_bom]:
txt, sum, sum_strd = process_bom(sub_bom, company_currency.id)
xml_tmp += txt
total += sum
total_strd += sum_strd
xml += "<lines style='lines'>" + xml_tmp + '</lines>'
xml += """<lines style='sub_total'> <row>
<col> """ + _('Components Cost of %s %s') % (str(number), product_uom_name) + """: </col>
<col/>
<col t='yes'/>
<col t='yes'>"""+ rml_obj.formatLang(total_strd, digits=purchase_price_digits) +' '+ (company_currency_symbol) + """</col>
<col t='yes'></col>
</row></lines>'"""
total2 = 0
xml_tmp = ''
for wrk in (sub_boms and sub_boms[1]):
txt, sum = process_workcenter(wrk)
xml_tmp += txt
total2 += sum
if xml_tmp:
xml += workcenter_header
xml += "<lines style='lines'>" + xml_tmp + '</lines>'
xml += """<lines style='sub_total'> <row>
<col> """ + _('Work Cost of %s %s') % (str(number), product_uom_name) +""": </col>
<col/>
<col/>
<col/>
<col t='yes'>"""+ rml_obj.formatLang(total2, digits=purchase_price_digits) +' '+ (company_currency_symbol) +"""</col>
</row></lines>'"""
xml += """<lines style='total'> <row>
<col> """ + _('Total Cost of %s %s') % (str(number), product_uom_name) + """: </col>
<col/>
<col t='yes'/>
<col t='yes'>"""+ rml_obj.formatLang(total_strd+total2, digits=purchase_price_digits) +' '+ (company_currency_symbol) + """</col>
<col t='yes'></col>
</row></lines>'"""
xml = '<?xml version="1.0" ?><report>' + config_start + config_stop + xml + '</report>'
return xml
report_custom('report.product.price', 'product.product', '', 'addons/mrp/report/price.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
YingHsuan/termite_data_server | web2py/gluon/tests/test_contribs.py | 18 | 1745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit tests for contribs """
import sys
import os
import unittest
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
from utils import md5_hash
import contrib.fpdf as fpdf
import contrib.pyfpdf as pyfpdf
class TestContribs(unittest.TestCase):
""" Tests the contrib package """
def test_fpdf(self):
""" Basic PDF test and sanity checks """
self.assertEqual(
fpdf.FPDF_VERSION, pyfpdf.FPDF_VERSION, 'version mistmatch')
self.assertEqual(fpdf.FPDF, pyfpdf.FPDF, 'class mistmatch')
pdf = fpdf.FPDF()
pdf.add_page()
pdf.compress = False
pdf.set_font('Arial', '', 14)
pdf.ln(10)
pdf.write(5, 'hello world')
pdf_out = pdf.output('', 'S')
self.assertTrue(fpdf.FPDF_VERSION in pdf_out, 'version string')
self.assertTrue('hello world' in pdf_out, 'sample message')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
udacity/course-front-end-frameworks | lesson3/quizNewFeature2/unit_tests.py | 1 | 2220 | is_correct = False
router = widget_inputs["check1"]
template = widget_inputs["check2"]
service = widget_inputs["check3"]
controller = widget_inputs["check4"]
module = widget_inputs["check5"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if not router:
is_correct = is_correct and False
commentizer("A router is needed for a number of reasons, but the most important reason is to manage the application's state via the URL. Does this feature need a specific page?")
else:
is_correct = True
if not template:
is_correct = is_correct and False
commentizer("Is anything going to display visually? Yep! Which component is used to house the feature's HTML?")
else:
is_correct = is_correct and True
if not service:
is_correct = is_correct and False
commentizer("Remember that services let different parts of the app share data? Will this feature be sharing data with other parts of the app?")
else:
is_correct = is_correct and True
if not controller:
is_correct = is_correct and False
commentizer("This feature will need to know some information about which items are saved for later. Which component provides data to a template?")
else:
is_correct = is_correct and True
if module:
is_correct = is_correct and False
commentizer("Modules help organize and package together a lot of functionality. There's no need to create an entirely new module just for this little feature. Since our app already has a module, we'll just add to that one. ")
else:
is_correct = is_correct and True
# if they're all unchecked
if not any([router, template, service, controller, module]):
is_correct = False
comments = []
comments.append("It would be *awesome* if we didn't need any of these and things would just magically work. Unfortunately, that's not the case!\n\nThink about what each of these components does - which one(s) are needed to achieve this functionality?")
if is_correct:
commentizer("Great job! There are a number of moving parts to Angular, so it's good to have a firm grasp on the responsibilities of each.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| mit |
stefanfoulis/django-multilingual | testproject/inline_registrations/admin.py | 5 | 1190 | from django.contrib import admin
from testproject.inline_registrations.models import (ArticleWithSimpleRegistration,
ArticleWithExternalInline,
ArticleWithInternalInline)
import multilingual
##########################################
# for ArticleWithSimpleRegistration
admin.site.register(ArticleWithSimpleRegistration)
########################################
# for ArticleWithExternalInline
class TranslationAdmin(multilingual.TranslationModelAdmin):
model = ArticleWithExternalInline._meta.translation_model
prepopulated_fields = {'slug_local': ('title',)}
class ArticleWithExternalInlineAdmin(multilingual.ModelAdmin):
inlines = [TranslationAdmin]
admin.site.register(ArticleWithExternalInline, ArticleWithExternalInlineAdmin)
########################################
# for ArticleWithInternalInline
class ArticleWithInternalInlineAdmin(multilingual.ModelAdmin):
class Translation(multilingual.TranslationModelAdmin):
prepopulated_fields = {'slug_local': ('title',)}
admin.site.register(ArticleWithInternalInline, ArticleWithInternalInlineAdmin)
| mit |
macks22/dblp | api/dblp_sql.py | 2 | 3801 | import sqlalchemy as sa
import db
import dblp
def insert(conn, ins):
"""Attempt to run an insertion statement; return results, None if error."""
try:
ins_res = conn.execute(ins)
except sa.exc.IntegrityError as err:
# a paper already exists with this id
logging.error(str(err))
return None
except Exception as e:
logging.error('unexpected exception\n%s', str(e))
return None
else:
return ins_res
def person_insert(conn, name):
sel = sa.sql.text("SELECT id FROM person WHERE LOWER(name)=LOWER(:n)")
res = conn.execute(sel, n=name)
p = res.first()
if p is not None:
return p['id']
ins = db.person.insert().values(name=name)
try:
res = conn.execute(ins)
except sa.exc.IntegrityError: # concurrency issue
res = conn.execute(sel, n=name)
p = res.first()
if p is None:
raise
else:
return p['id']
return res.inserted_primary_key[0]
def process_record(record):
"""Update the database with the contents of the record."""
logging.debug('processing record\n%s' % record);
conn = db.engine.connect()
paper_id = record.id
ins = db.papers.insert().\
values(id=paper_id, title=record.title,
venue=record.venue, year=record.year,
abstract=record.abstract)
# attempt to insert a new paper into the db
result = insert(conn, ins)
if result is None:
# since ids come from data, we've already processed this record
conn.close()
return False
# make new records for each author
for author in record.authors:
person_id = person_insert(conn, author)
ins = db.authors.insert().values(paper=paper_id, person=person_id)
insert(conn, ins) # may fail, but we don't really care
for ref in record.refs:
ins = db.refs.insert().values(paper=paper_id, ref=ref)
insert(conn, ins)
conn.close()
return True # success
def process_records(fpath):
"""Process all records in data file."""
processed = 0
successful = 0
for record in iterrecords(fpath):
try:
success = process_record(record)
except Exception as e:
logging.info('unexpected exception in `process_record`')
logging.error(str(e))
success = False
processed += 1
if success:
successful += 1
if processed % 20 == 0:
logging.info('processed: %d records' % processed)
logging.info('successful: %d' % successful)
def make_parser():
parser = argparse.ArgumentParser(
description="parse dblp data")
parser.add_argument(
'fpath', action='store',
help='file to parse data from')
parser.add_argument(
'-v', '--verbose', action='store_true',
help="turn on verbose logging")
parser.add_argument(
'-vv', '--very-verbose', action='store_true',
help='turn on very verbose logging')
return parser
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s][%(levelname)s]: %(message)s')
elif args.very_verbose:
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s][%(levelname)s]: %(message)s')
db.engine.echo = True
else:
logging.basicConfig(level=logging.CRITICAL)
db.engine.echo = False
try:
process_records(args.fpath)
except Exception as err:
logging.info('ERROR OCCURED IN `process_records`')
logging.error(str(err))
sys.exit(-1)
sys.exit(0)
| mit |
tqtran7/horizon | openstack_dashboard/dashboards/project/volumes/backups/forms.py | 57 | 4363 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing backups.
"""
import operator
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.containers \
import forms as containers_forms
class CreateBackupForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Backup Name"))
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
container_name = forms.CharField(
max_length=255,
label=_("Container Name"),
validators=[containers_forms.no_slash_validator],
required=False)
volume_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
# Create a container for the user if no input is given
if not data['container_name']:
data['container_name'] = 'volumebackups'
try:
backup = api.cinder.volume_backup_create(request,
data['volume_id'],
data['container_name'],
data['name'],
data['description'])
message = _('Creating volume backup "%s"') % data['name']
messages.success(request, message)
return backup
except Exception:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(request,
_('Unable to create volume backup.'),
redirect=redirect)
class RestoreBackupForm(forms.SelfHandlingForm):
volume_id = forms.ChoiceField(label=_('Select Volume'), required=False)
backup_id = forms.CharField(widget=forms.HiddenInput())
backup_name = forms.CharField(widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(RestoreBackupForm, self).__init__(request, *args, **kwargs)
try:
volumes = api.cinder.volume_list(request)
except Exception:
msg = _('Unable to lookup volume or backup information.')
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(request, msg, redirect=redirect)
raise exceptions.Http302(redirect)
volumes.sort(key=operator.attrgetter('name', 'created_at'))
choices = [('', _('Create a New Volume'))]
choices.extend((volume.id, volume.name) for volume in volumes)
self.fields['volume_id'].choices = choices
def handle(self, request, data):
backup_id = data['backup_id']
backup_name = data['backup_name'] or None
volume_id = data['volume_id'] or None
try:
restore = api.cinder.volume_backup_restore(request,
backup_id,
volume_id)
# Needed for cases when a new volume is created.
volume_id = restore.volume_id
message = _('Successfully restored backup %(backup_name)s '
'to volume with id: %(volume_id)s')
messages.success(request, message % {'backup_name': backup_name,
'volume_id': volume_id})
return restore
except Exception:
msg = _('Unable to restore backup.')
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 |
UpSea/thirdParty | pyqtgraph-0.9.10/examples/PlotWidget.py | 6 | 2553 | # -*- coding: utf-8 -*-
"""
Demonstrates use of PlotWidget class. This is little more than a
GraphicsView with a PlotItem placed in its center.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.setWindowTitle('pyqtgraph example: PlotWidget')
mw.resize(800,800)
cw = QtGui.QWidget()
mw.setCentralWidget(cw)
l = QtGui.QVBoxLayout()
cw.setLayout(l)
pw = pg.PlotWidget(name='Plot1') ## giving the plots names allows us to link their axes together
l.addWidget(pw)
pw2 = pg.PlotWidget(name='Plot2')
l.addWidget(pw2)
pw3 = pg.PlotWidget()
l.addWidget(pw3)
mw.show()
## Create an empty plot curve to be filled later, set its pen
p1 = pw.plot()
p1.setPen((200,200,100))
## Add in some extra graphics
rect = QtGui.QGraphicsRectItem(QtCore.QRectF(0, 0, 1, 5e-11))
rect.setPen(QtGui.QPen(QtGui.QColor(100, 200, 100)))
pw.addItem(rect)
pw.setLabel('left', 'Value', units='V')
pw.setLabel('bottom', 'Time', units='s')
pw.setXRange(0, 2)
pw.setYRange(0, 1e-10)
def rand(n):
data = np.random.random(n)
data[int(n*0.1):int(n*0.13)] += .5
data[int(n*0.18)] += 2
data[int(n*0.1):int(n*0.13)] *= 5
data[int(n*0.18)] *= 20
data *= 1e-12
return data, np.arange(n, n+len(data)) / float(n)
def updateData():
yd, xd = rand(10000)
p1.setData(y=yd, x=xd)
## Start a timer to rapidly update the plot in pw
t = QtCore.QTimer()
t.timeout.connect(updateData)
t.start(50)
#updateData()
## Multiple parameterized plots--we can autogenerate averages for these.
for i in range(0, 5):
for j in range(0, 3):
yd, xd = rand(10000)
pw2.plot(y=yd*(j+1), x=xd, params={'iter': i, 'val': j})
## Test large numbers
curve = pw3.plot(np.random.normal(size=100)*1e0, clickable=True)
curve.curve.setClickable(True)
curve.setPen('w') ## white pen
curve.setShadowPen(pg.mkPen((70,70,30), width=6, cosmetic=True))
def clicked():
print("curve clicked")
curve.sigClicked.connect(clicked)
lr = pg.LinearRegionItem([1, 30], bounds=[0,100], movable=True)
pw3.addItem(lr)
line = pg.InfiniteLine(angle=90, movable=True)
pw3.addItem(line)
line.setBounds([0,200])
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
d/hamster-applet | src/hamster/utils/graphics.py | 1 | 57654 | # - coding: utf-8 -
# Copyright (C) 2008-2010 Toms Bauģis <toms.baugis at gmail.com>
# Dual licensed under the MIT or GPL Version 2 licenses.
# See http://github.com/tbaugis/hamster_experiments/blob/master/README.textile
import math
import datetime as dt
import gtk, gobject
import pango, cairo
import re
try:
import pytweener
except: # we can also live without tweener. Scene.animate will not work
pytweener = None
import colorsys
from collections import deque
class Colors(object):
hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
hex_color_long = re.compile("#([a-fA-F0-9]{4})([a-fA-F0-9]{4})([a-fA-F0-9]{4})")
def parse(self, color):
assert color is not None
#parse color into rgb values
if isinstance(color, basestring):
match = self.hex_color_long.match(color)
if match:
color = [int(color, 16) / 65535.0 for color in match.groups()]
else:
match = self.hex_color_normal.match(color)
if match:
color = [int(color, 16) / 255.0 for color in match.groups()]
else:
match = self.hex_color_short.match(color)
color = [int(color + color, 16) / 255.0 for color in match.groups()]
elif isinstance(color, gtk.gdk.Color):
color = [color.red / 65535.0,
color.green / 65535.0,
color.blue / 65535.0]
else:
# otherwise we assume we have color components in 0..255 range
if color[0] > 1 or color[1] > 1 or color[2] > 1:
color = [c / 255.0 for c in color]
return color
def rgb(self, color):
return [c * 255 for c in self.parse(color)]
def gdk(self, color):
c = self.parse(color)
return gtk.gdk.Color(int(c[0] * 65535.0), int(c[1] * 65535.0), int(c[2] * 65535.0))
def is_light(self, color):
# tells you if color is dark or light, so you can up or down the
# scale for improved contrast
return colorsys.rgb_to_hls(*self.rgb(color))[1] > 150
def darker(self, color, step):
# returns color darker by step (where step is in range 0..255)
hls = colorsys.rgb_to_hls(*self.rgb(color))
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
def contrast(self, color, step):
"""if color is dark, will return a lighter one, otherwise darker"""
hls = colorsys.rgb_to_hls(*self.rgb(color))
if self.is_light(color):
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
else:
return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
# returns color darker by step (where step is in range 0..255)
Colors = Colors() # this is a static class, so an instance will do
class Graphics(object):
"""If context is given upon contruction, will perform drawing
operations on context instantly. Otherwise queues up the drawing
instructions and performs them in passed-in order when _draw is called
with context.
Most of instructions are mapped to cairo functions by the same name.
Where there are differences, documenation is provided.
See http://cairographics.org/documentation/pycairo/2/reference/context.html
for detailed description of the cairo drawing functions.
"""
def __init__(self, context = None):
self.context = context
self.colors = Colors # pointer to the color utilities instance
self.extents = None # bounds of the object, only if interactive
self.paths = None # paths for mouse hit checks
self._last_matrix = None
self.__new_instructions = deque() # instruction set until it is converted into path-based instructions
self.__instruction_cache = None
self.cache_surface = None
def clear(self):
"""clear all instructions"""
self.__new_instructions = deque()
self.__instruction_cache = None
self.paths = []
@staticmethod
def _stroke(context): context.stroke()
def stroke(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke,)
@staticmethod
def _fill(context): context.fill()
def fill(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill,)
@staticmethod
def _stroke_preserve(context): context.stroke_preserve()
def stroke_preserve(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke_preserve,)
@staticmethod
def _fill_preserve(context): context.fill_preserve()
def fill_preserve(self, color = None, alpha = 1):
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill_preserve,)
@staticmethod
def _new_path(context): context.new_path()
def new_path(self):
self._add_instruction(self._new_path,)
@staticmethod
def _paint(context): context.paint()
def paint(self):
self._add_instruction(self._paint,)
@staticmethod
def _set_source(context, image):
context.set_source(image)
def set_source(self, image, x = 0, y = 0):
self._add_instruction(self._set_source, image)
@staticmethod
def _set_source_surface(context, surface, x, y):
context.set_source_surface(surface, x, y)
def set_source_surface(self, surface, x = 0, y = 0):
self._add_instruction(self._set_source_surface, surface, x, y)
@staticmethod
def _set_source_pixbuf(context, pixbuf, x, y):
context.set_source_pixbuf(pixbuf, x, y)
def set_source_pixbuf(self, pixbuf, x = 0, y = 0):
self._add_instruction(self._set_source_pixbuf, pixbuf, x, y)
@staticmethod
def _save_context(context): context.save()
def save_context(self):
self._add_instruction(self._save_context)
@staticmethod
def _restore_context(context): context.restore()
def restore_context(self):
self._add_instruction(self._restore_context)
@staticmethod
def _translate(context, x, y): context.translate(x, y)
def translate(self, x, y):
self._add_instruction(self._translate, x, y)
@staticmethod
def _rotate(context, radians): context.rotate(radians)
def rotate(self, radians):
self._add_instruction(self._rotate, radians)
@staticmethod
def _move_to(context, x, y): context.move_to(x, y)
def move_to(self, x, y):
self._add_instruction(self._move_to, x, y)
@staticmethod
def _line_to(context, x, y): context.line_to(x, y)
def line_to(self, x, y = None):
if y is not None:
self._add_instruction(self._line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._line_to, x2, y2)
@staticmethod
def _rel_line_to(context, x, y): context.rel_line_to(x, y)
def rel_line_to(self, x, y = None):
if x and y:
self._add_instruction(self._rel_line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._rel_line_to, x2, y2)
@staticmethod
def _curve_to(context, x, y, x2, y2, x3, y3):
context.curve_to(x, y, x2, y2, x3, y3)
def curve_to(self, x, y, x2, y2, x3, y3):
"""draw a curve. (x2, y2) is the middle point of the curve"""
self._add_instruction(self._curve_to, x, y, x2, y2, x3, y3)
@staticmethod
def _close_path(context): context.close_path()
def close_path(self):
self._add_instruction(self._close_path,)
@staticmethod
def _set_line_width(context, width):
context.set_line_width(width)
@staticmethod
def _set_dash(context, dash, dash_offset = 0):
context.set_dash(dash, dash_offset)
def set_line_style(self, width = None, dash = None, dash_offset = 0):
"""change width and dash of a line"""
if width is not None:
self._add_instruction(self._set_line_width, width)
if dash is not None:
self._add_instruction(self._set_dash, dash, dash_offset)
def _set_color(self, context, r, g, b, a):
if a < 1:
context.set_source_rgba(r, g, b, a)
else:
context.set_source_rgb(r, g, b)
def set_color(self, color, alpha = 1):
"""set active color. You can use hex colors like "#aaa", or you can use
normalized RGB tripplets (where every value is in range 0..1), or
you can do the same thing in range 0..65535.
also consider skipping this operation and specify the color on stroke and
fill.
"""
color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
if len(color) == 4 and alpha is None:
alpha = color[3]
r, g, b = color[:3]
self._add_instruction(self._set_color, r, g, b, alpha)
@staticmethod
def _arc(context, x, y, radius, start_angle, end_angle):
context.arc(x, y, radius, start_angle, end_angle)
def arc(self, x, y, radius, start_angle, end_angle):
"""draw arc going counter-clockwise from start_angle to end_angle"""
self._add_instruction(self._arc, x, y, radius, start_angle, end_angle)
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction(self._arc, x, y, radius, 0, math.pi * 2)
def ellipse(self, x, y, width, height, edges = None):
"""draw 'perfect' ellipse, opposed to squashed circle. works also for
equilateral polygons"""
# the automatic edge case is somewhat arbitrary
steps = edges or max((32, width, height)) / 2
angle = 0
step = math.pi * 2 / steps
points = []
while angle < math.pi * 2:
points.append((width / 2.0 * math.cos(angle),
height / 2.0 * math.sin(angle)))
angle += step
min_x = min((point[0] for point in points))
min_y = min((point[1] for point in points))
self.move_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
for p_x, p_y in points:
self.line_to(p_x - min_x + x, p_y - min_y + y)
self.line_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
@staticmethod
def _arc_negative(context, x, y, radius, start_angle, end_angle):
context.arc_negative(x, y, radius, start_angle, end_angle)
def arc_negative(self, x, y, radius, start_angle, end_angle):
"""draw arc going clockwise from start_angle to end_angle"""
self._add_instruction(self._arc_negative, x, y, radius, start_angle, end_angle)
@staticmethod
def _rounded_rectangle(context, x, y, x2, y2, corner_radius):
half_corner = corner_radius / 2
context.move_to(x + corner_radius, y)
context.line_to(x2 - corner_radius, y)
context.curve_to(x2 - half_corner, y, x2, y + half_corner, x2, y + corner_radius)
context.line_to(x2, y2 - corner_radius)
context.curve_to(x2, y2 - half_corner, x2 - half_corner, y2, x2 - corner_radius, y2)
context.line_to(x + corner_radius, y2)
context.curve_to(x + half_corner, y2, x, y2 - half_corner, x, y2 - corner_radius)
context.line_to(x, y + corner_radius)
context.curve_to(x, y + half_corner, x + half_corner, y, x + corner_radius, y)
@staticmethod
def _rectangle(context, x, y, w, h): context.rectangle(x, y, w, h)
def rectangle(self, x, y, width, height, corner_radius = 0):
"draw a rectangle. if corner_radius is specified, will draw rounded corners"
if corner_radius <= 0:
self._add_instruction(self._rectangle, x, y, width, height)
return
# make sure that w + h are larger than 2 * corner_radius
corner_radius = min(corner_radius, min(width, height) / 2)
x2, y2 = x + width, y + height
self._add_instruction(self._rounded_rectangle, x, y, x2, y2, corner_radius)
def fill_area(self, x, y, width, height, color, opacity = 1):
"""fill rectangular area with specified color"""
self.rectangle(x, y, width, height)
self.fill(color, opacity)
def fill_stroke(self, fill = None, stroke = None, line_width = None):
"""fill and stroke the drawn area in one go"""
if line_width: self.set_line_style(line_width)
if fill and stroke:
self.fill_preserve(fill)
elif fill:
self.fill(fill)
if stroke:
self.stroke(stroke)
@staticmethod
def _show_layout(context, text, font_desc, alignment, width, wrap, ellipsize):
layout = context.create_layout()
layout.set_font_description(font_desc)
layout.set_markup(text)
layout.set_width(width)
layout.set_alignment(alignment)
if width > 0:
if wrap is not None:
layout.set_wrap(wrap)
else:
layout.set_ellipsize(ellipsize or pango.ELLIPSIZE_END)
context.show_layout(layout)
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise "Can not create layout without existing context!"
layout = self.context.create_layout()
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if size: font_desc.set_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout
def show_text(self, text, size = None, color = None):
"""display text with system's default font"""
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if color: self.set_color(color)
if size: font_desc.set_size(size * pango.SCALE)
self.show_layout(text, font_desc)
def show_layout(self, text, font_desc, alignment = pango.ALIGN_LEFT, width = -1, wrap = None, ellipsize = None):
"""display text. font_desc is string of pango font description
often handier than calling this function directly, is to create
a class:Label object
"""
self._add_instruction(self._show_layout, text, font_desc, alignment, width, wrap, ellipsize)
def _remember_path(self, context, instruction):
context.save()
context.identity_matrix()
if instruction in (self._fill, self._fill_preserve):
new_extents = context.path_extents()
else:
new_extents = context.stroke_extents()
self.extents = self.extents or new_extents
self.extents = (min(self.extents[0], new_extents[0]),
min(self.extents[1], new_extents[1]),
max(self.extents[2], new_extents[2]),
max(self.extents[3], new_extents[3]))
self.paths.append(context.copy_path())
context.restore()
def _add_instruction(self, function, *params):
if self.context:
function(self.context, *params)
else:
self.paths = None
self.__new_instructions.append((function, params))
def _draw(self, context, opacity, with_extents = False):
"""draw accumulated instructions in context"""
# if we have been moved around, we should update bounds
check_extents = with_extents and (context.get_matrix() != self._last_matrix or not self.paths)
if check_extents:
self.paths = deque()
self.extents = None
if self.__new_instructions: #new stuff!
self.__instruction_cache = deque()
current_color = None
current_line = None
instruction_cache = []
while self.__new_instructions:
instruction, args = self.__new_instructions.popleft()
if instruction in (self._set_source,
self._set_source_surface,
self._set_source_pixbuf,
self._paint,
self._translate,
self._save_context,
self._restore_context):
self.__instruction_cache.append((None, None, None, instruction, args))
elif instruction == self._show_layout:
self.__instruction_cache.append((None, current_color, None, instruction, args))
elif instruction == self._set_color:
current_color = args
elif instruction == self._set_line_width:
current_line = args
elif instruction in (self._new_path, self._stroke, self._fill,
self._stroke_preserve,
self._fill_preserve):
self.__instruction_cache.append((context.copy_path(),
current_color,
current_line,
instruction, ()))
instruction_cache = []
if check_extents:
self._remember_path(context, instruction)
else:
# the rest are non-special
instruction_cache.append((instruction, args))
if opacity < 1 and instruction == self._set_color:
self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
elif opacity < 1 and instruction == self._paint:
context.paint_with_alpha(opacity)
else:
instruction(context, *args) # reset even on preserve as the instruction will preserve it instead
# last one
if check_extents and instruction not in (self._fill, self._stroke, self._fill_preserve, self._stroke_preserve):
self._remember_path(context, self._fill)
# also empty the temporary cache that was not met by a stroke at the end
while instruction_cache: # stroke is missing so we just cache
instruction, args = instruction_cache.pop(0)
self.__instruction_cache.append((None, None, None, instruction, args))
else:
if not self.__instruction_cache:
return
for path, color, line, instruction, args in self.__instruction_cache:
if color:
if opacity < 1:
self._set_color(context, color[0], color[1], color[2], color[3] * opacity)
else:
self._set_color(context, *color)
if line: self._set_line_width(context, *line)
if path:
context.append_path(path)
if check_extents:
self._remember_path(context, self._fill)
if instruction:
if instruction == self._paint and opacity < 1:
context.paint_with_alpha(opacity)
else:
instruction(context, *args)
if check_extents and instruction not in (self._fill, self._stroke, self._fill_preserve, self._stroke_preserve):
# last one
self._remember_path(context, self._fill)
self._last_matrix = context.get_matrix()
def _draw_as_bitmap(self, context, opacity):
"""
instead of caching paths, this function caches the whole drawn thing
use cache_as_bitmap on sprite to enable this mode
"""
matrix = context.get_matrix()
if self.__new_instructions or matrix != self._last_matrix:
if self.__new_instructions:
self.__instruction_cache = list(self.__new_instructions)
self.__new_instructions = deque()
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
# no instructions - nothing to do
return
# instructions that end path
path_end_instructions = (self._new_path, self._stroke, self._fill, self._stroke_preserve, self._fill_preserve)
# measure the path extents so we know the size of surface
# also to save some time use the context to paint for the first time
for instruction, args in self.__instruction_cache:
if instruction in path_end_instructions:
self._remember_path(context, instruction)
if instruction in (self._set_source_pixbuf, self._set_source_surface):
# draw a rectangle around the pathless instructions so that the extents are correct
pixbuf = args[0]
x = args[1] if len(args) > 1 else 0
y = args[2] if len(args) > 2 else 0
self._rectangle(context, x, y, pixbuf.get_width(), pixbuf.get_height())
if instruction == self._paint and opacity < 1:
context.paint_with_alpha(opacity)
elif instruction == self._set_color and opacity < 1:
self._set_color(context, args[0], args[1], args[2], args[3] * opacity)
else:
instruction(context, *args)
if instruction not in path_end_instructions: # last one
self._remember_path(context, self._fill)
# now draw the instructions on the caching surface
w = int(self.extents[2] - self.extents[0]) + 1
h = int(self.extents[3] - self.extents[1]) + 1
self.cache_surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, w, h)
ctx = gtk.gdk.CairoContext(cairo.Context(self.cache_surface))
ctx.translate(-self.extents[0], -self.extents[1])
ctx.transform(matrix)
for instruction, args in self.__instruction_cache:
instruction(ctx, *args)
self._last_matrix = matrix
else:
context.save()
context.identity_matrix()
context.translate(self.extents[0], self.extents[1])
context.set_source_surface(self.cache_surface)
if opacity < 1:
context.paint_with_alpha(opacity)
else:
context.paint()
context.restore()
class Sprite(gtk.Object):
"""The Sprite class is a basic display list building block: a display list
node that can display graphics and can also contain children.
Once you have created the sprite, use Scene's add_child to add it to
scene
"""
__gsignals__ = {
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-render": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, x = 0, y = 0,
opacity = 1, visible = True,
rotation = 0, pivot_x = 0, pivot_y = 0,
scale_x = 1, scale_y = 1,
interactive = False, draggable = False,
z_order = 0, mouse_cursor = None,
cache_as_bitmap = False, snap_to_pixel = True):
gtk.Object.__init__(self)
#: list of children sprites. Use :func:`add_child` to add sprites
self.sprites = []
#: instance of :ref:`graphics` for this sprite
self.graphics = Graphics()
#: boolean denoting whether the sprite responds to mouse events
self.interactive = interactive
#: boolean marking if sprite can be automatically dragged
self.draggable = draggable
#: relative x coordinate of the sprites' rotation point
self.pivot_x = pivot_x
#: relative y coordinates of the sprites' rotation point
self.pivot_y = pivot_y
#: sprite opacity
self.opacity = opacity
#: boolean visibility flag
self.visible = visible
#: pointer to parent :class:`Sprite` or :class:`Scene`
self.parent = None
#: sprite coordinates
self.x, self.y = x, y
#: rotation of the sprite in radians (use :func:`math.degrees` to convert to degrees if necessary)
self.rotation = rotation
#: scale X
self.scale_x = scale_x
#: scale Y
self.scale_y = scale_y
#: drawing order between siblings. The one with the highest z_order will be on top.
self.z_order = z_order
#: mouse-over cursor of the sprite. See :meth:`Scene.mouse_cursor`
#: for possible values
self.mouse_cursor = mouse_cursor
#: x position of the cursor within mouse upon drag. change this value
#: in on-drag-start to adjust drag point
self.drag_x = None
#: y position of the cursor within mouse upon drag. change this value
#: in on-drag-start to adjust drag point
self.drag_y = None
#: Whether the sprite should be cached as a bitmap. Default: true
#: Generally good when you have many static sprites
self.cache_as_bitmap = cache_as_bitmap
#: Should the sprite coordinates always rounded to full pixel. Default: true
#: Mostly this is good for performance but in some cases that can lead
#: to rounding errors in positioning.
self.snap_to_pixel = snap_to_pixel
self.__dict__["_sprite_dirty"] = True # flag that indicates that the graphics object of the sprite should be rendered
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") == val:
return
self.__dict__[name] = val
if name not in ('x', 'y', 'rotation', 'scale_x', 'scale_y', 'opacity', 'visible', 'z_order'):
self.__dict__["_sprite_dirty"] = True
if name == 'opacity' and self.__dict__.get("cache_as_bitmap") and self.__dict__.get("graphics"):
# invalidating cache for the bitmap version as that paints opacity in the image
self.graphics._last_matrix = None
elif name == 'interactive' and self.__dict__.get("graphics"):
# when suddenly item becomes interactive, it well can be that the extents had not been
# calculated
self.graphics._last_matrix = None
elif name == 'z_order' and self.__dict__.get('parent'):
self.parent._sort()
self.redraw()
def _sort(self):
"""sort sprites by z_order"""
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def add_child(self, *sprites):
"""Add child sprite. Child will be nested within parent"""
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self._sort()
def remove_child(self, *sprites):
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def check_hit(self, x, y):
"""check if the given coordinates are inside the sprite's fill or stroke
path"""
if not self.graphics.extents:
return False
sprite_x, sprite_y, sprite_x2, sprite_y2 = self.graphics.extents
if sprite_x <= x <= sprite_x2 and sprite_y <= y <= sprite_y2:
paths = self.graphics.paths
if not paths:
return True
context = cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))
for path in paths:
context.append_path(path)
return context.in_fill(x, y)
else:
return False
def get_scene(self):
"""returns class:`Scene` the sprite belongs to"""
if hasattr(self, 'parent') and self.parent:
if isinstance(self.parent, Scene):
return self.parent
else:
return self.parent.get_scene()
return None
def redraw(self):
"""queue redraw of the sprite. this function is called automatically
whenever a sprite attribute changes. sprite changes that happen
during scene redraw are ignored in order to avoid echoes.
Call scene.redraw() explicitly if you need to redraw in these cases.
"""
scene = self.get_scene()
if scene and scene._redraw_in_progress == False:
self.parent.redraw()
def animate(self, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
"""Request paretn Scene to Interpolate attributes using the internal tweener.
Specify sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
self.animate(x = 50, y = 100)
"""
scene = self.get_scene()
if scene:
scene.animate(self, duration, easing, on_complete, on_update, **kwargs)
def _draw(self, context, opacity = 1):
if self.visible is False:
return
context.new_path()
if (self._sprite_dirty): # send signal to redo the drawing when sprite is dirty
self.emit("on-render")
self.__dict__["_sprite_dirty"] = False
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.save()
if any((self.x, self.y, self.pivot_x, self.pivot_y)):
if self.snap_to_pixel:
context.translate(int(self.x) + int(self.pivot_x), int(self.y) + int(self.pivot_y))
else:
context.translate(self.x + self.pivot_x, self.y + self.pivot_y)
if self.rotation:
context.rotate(self.rotation)
if self.pivot_x or self.pivot_y:
if self.snap_to_pixel:
context.translate(int(-self.pivot_x), int(-self.pivot_y))
else:
context.translate(-self.pivot_x, -self.pivot_y)
if self.scale_x != 1 or self.scale_y != 1:
context.scale(self.scale_x, self.scale_y)
if self.cache_as_bitmap:
self.graphics._draw_as_bitmap(context, self.opacity * opacity)
else:
self.graphics._draw(context, self.opacity * opacity, self.interactive or self.draggable)
for sprite in self.sprites:
sprite._draw(context, self.opacity * opacity)
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.restore()
context.new_path() #forget about us
class BitmapSprite(Sprite):
"""Caches given image data in a surface similar to targets, which ensures
that drawing it will be quick and low on CPU.
Image data can be either :class:`cairo.ImageSurface` or :class:`gtk.gdk.Pixbuf`
"""
def __init__(self, image_data = None, **kwargs):
Sprite.__init__(self, **kwargs)
#: image data
self.image_data = image_data
self._surface = None
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'image_data':
self.__dict__['_surface'] = None
if self.image_data:
self.__dict__['width'] = self.image_data.get_width()
self.__dict__['height'] = self.image_data.get_height()
def _draw(self, context, opacity = 1):
if self.image_data is None or self.width is None or self.height is None:
return
if not self._surface:
# caching image on surface similar to the target
self._surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA,
self.width,
self.height)
local_context = gtk.gdk.CairoContext(cairo.Context(self._surface))
if isinstance(self.image_data, gtk.gdk.Pixbuf):
local_context.set_source_pixbuf(self.image_data, 0, 0)
else:
local_context.set_source_surface(self.image_data)
local_context.paint()
# add instructions with the resulting surface
self.graphics.set_source_surface(self._surface)
self.graphics.paint()
self.graphics.rectangle(0, 0, self.width, self.height)
Sprite._draw(self, context, opacity)
class Image(BitmapSprite):
"""Displays image by path. Currently supports only PNG images."""
def __init__(self, path, **kwargs):
BitmapSprite.__init__(self, **kwargs)
#: path to the image
self.path = path
def __setattr__(self, name, val):
BitmapSprite.__setattr__(self, name, val)
if name == 'path': # load when the value is set to avoid penalty on render
self.image_data = cairo.ImageSurface.create_from_png(self.path)
class Icon(BitmapSprite):
"""Displays icon by name and size in the theme"""
def __init__(self, name, size=24, **kwargs):
BitmapSprite.__init__(self, **kwargs)
self.theme = gtk.icon_theme_get_default()
#: icon name from theme
self.name = name
#: icon size in pixels
self.size = size
def __setattr__(self, name, val):
BitmapSprite.__setattr__(self, name, val)
if name in ('name', 'size'): # no other reason to discard cache than just on path change
if self.__dict__.get('name') and self.__dict__.get('size'):
self.image_data = self.theme.load_icon(self.name, self.size, 0)
else:
self.image_data = None
class Label(Sprite):
def __init__(self, text = "", size = 10, color = None,
alignment = pango.ALIGN_LEFT, **kwargs):
Sprite.__init__(self, **kwargs)
self.width, self.height = None, None
#: pango.FontDescription, default is the system's font
self.font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
self.font_desc.set_size(size * pango.SCALE)
#: color of label either as hex string or an (r,g,b) tuple
self.color = color
self._bounds_width = -1
#: wrapping method. Can be set to pango. [WRAP_WORD, WRAP_CHAR,
#: WRAP_WORD_CHAR]
self.wrap = None
#: Ellipsize mode. Can be set to pango. [ELLIPSIZE_NONE,
#: ELLIPSIZE_START, ELLIPSIZE_MIDDLE, ELLIPSIZE_END]
self.ellipsize = None
#: alignment. one of pango.[ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER]
self.alignment = alignment
#: label text
self.text = text
#: font size
self.size = size
self.__surface = None
self.connect("on-render", self.on_render)
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") != val:
if name == "width" and val and self.__dict__.get('_bounds_width') and val * pango.SCALE == self.__dict__['_bounds_width']:
return
Sprite.__setattr__(self, name, val)
if name == "width":
# setting width means consumer wants to contrain the label
if val is None or val == -1:
self.__dict__['_bounds_width'] = -1
else:
self.__dict__['_bounds_width'] = val * pango.SCALE
if name in ("width", "text", "size", "font_desc", "wrap", "ellipsize"):
# avoid chicken and egg
if "text" in self.__dict__ and "size" in self.__dict__:
self._set_dimensions()
def on_render(self, sprite):
if not self.text:
self.graphics.clear()
return
self.graphics.set_color(self.color)
self.graphics.show_layout(self.text, self.font_desc,
self.alignment,
self._bounds_width,
self.wrap,
self.ellipsize)
if self._bounds_width != -1:
rect_width = self._bounds_width / pango.SCALE
else:
rect_width = self.width
self.graphics.rectangle(0, 0, rect_width, self.height)
def _set_dimensions(self):
context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
layout = context.create_layout()
layout.set_font_description(self.font_desc)
layout.set_markup(self.text)
layout.set_width(self._bounds_width)
layout.set_ellipsize(pango.ELLIPSIZE_NONE)
if self.wrap is not None:
layout.set_wrap(self.wrap)
else:
layout.set_ellipsize(self.ellipsize or pango.ELLIPSIZE_END)
# TODO - the __dict__ part look rather lame but allows to circumvent the setattr
self.__dict__['width'], self.height = layout.get_pixel_size()
class Rectangle(Sprite):
def __init__(self, w, h, corner_radius = 0, fill = None, stroke = None, **kwargs):
Sprite.__init__(self, **kwargs)
#: width
self.width = w
#: height
self.height = h
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = 1
#: corner radius. Set bigger than 0 for rounded corners
self.corner_radius = corner_radius
self.connect("on-render", self.on_render)
def on_render(self, sprite):
x, y = 0, 0
if self.snap_to_pixel:
x, y = 0.5, 0.5
self.graphics.rectangle(x, y, self.width, self.height, self.corner_radius)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Polygon(Sprite):
def __init__(self, points, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: list of (x,y) tuples that the line should go through. Polygon
#: will automatically close path.
self.points = points
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.points: return
self.graphics.move_to(*self.points[0])
self.graphics.line_to(self.points)
self.graphics.close_path()
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Circle(Sprite):
def __init__(self, width, height, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: circle width
self.width = width
#: circle height
self.height = height
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if self.width == self.height:
radius = self.width / 2.0
self.graphics.circle(radius, radius, radius)
else:
self.graphics.ellipse(0, 0, self.width, self.height)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Scene(gtk.DrawingArea):
""" Drawing area for displaying sprites.
Add sprites to the Scene by calling :func:`add_child`.
Scene is descendant of `gtk.DrawingArea <http://www.pygtk.org/docs/pygtk/class-gtkdrawingarea.html>`_
and thus inherits all it's methods and everything.
"""
__gsignals__ = {
"expose-event": "override",
"configure_event": "override",
"on-enter-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-finish-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-start": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-mouse-move": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-scroll": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, interactive = True, framerate = 60,
background_color = None, scale = False, keep_aspect = True):
gtk.DrawingArea.__init__(self)
if interactive:
self.set_events(gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.SCROLL_MASK)
self.connect("motion_notify_event", self.__on_mouse_move)
self.connect("enter_notify_event", self.__on_mouse_enter)
self.connect("leave_notify_event", self.__on_mouse_leave)
self.connect("button_press_event", self.__on_button_press)
self.connect("button_release_event", self.__on_button_release)
self.connect("scroll-event", self.__on_scroll)
#: list of sprites in scene. use :func:`add_child` to add sprites
self.sprites = []
#: framerate of animation. This will limit how often call for
#: redraw will be performed (that is - not more often than the framerate). It will
#: also influence the smoothness of tweeners.
self.framerate = framerate
#: Scene width. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.width = None
#: Scene height. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.height = None
#: instance of :class:`pytweener.Tweener` that is used by
#: :func:`animate` function, but can be also accessed directly for advanced control.
self.tweener = None
if pytweener:
self.tweener = pytweener.Tweener(0.4, pytweener.Easing.Cubic.ease_in_out)
#: instance of :class:`Colors` class for color parsing
self.colors = Colors
#: read only info about current framerate (frames per second)
self.fps = 0 # inner frames per second counter
#: Last known x position of the mouse (set on expose event)
self.mouse_x = None
#: Last known y position of the mouse (set on expose event)
self.mouse_y = None
#: Background color of the scene. Use either a string with hex color or an RGB triplet.
self.background_color = background_color
#: Mouse cursor appearance.
#: Replace with your own cursor or set to False to have no cursor.
#: None will revert back the default behavior
self.mouse_cursor = None
blank_pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
self._blank_cursor = gtk.gdk.Cursor(blank_pixmap, blank_pixmap, gtk.gdk.Color(), gtk.gdk.Color(), 0, 0)
#: Miminum distance in pixels for a drag to occur
self.drag_distance = 1
self._last_frame_time = None
self._mouse_sprite = None
self._drag_sprite = None
self.__drag_started = False
self.__drag_start_x, self.__drag_start_y = None, None
self._mouse_in = False
self.__last_cursor = None
self.__drawing_queued = False
self._redraw_in_progress = True
#: When specified, upon window resize the content will be scaled
#: relative to original window size. Defaults to False.
self.scale = scale
#: Should the stage maintain aspect ratio upon scale if
#: :attr:`Scene.scale` is enabled. Defaults to true.
self.keep_aspect = keep_aspect
self._original_width, self._original_height = None, None
def add_child(self, *sprites):
"""Add one or several :class:`Sprite` objects to the scene"""
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self._sort()
def _sort(self):
"""sort sprites by z_order"""
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
"""Remove one or several :class:`Sprite` sprites from scene """
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def clear(self):
"""Remove all sprites from scene"""
self.remove_child(*self.sprites)
def animate(self, sprite, duration = None, easing = None, on_complete = None, on_update = None, **kwargs):
"""Interpolate attributes of the given object using the internal tweener
and redrawing scene after every tweener update.
Specify the sprite and sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Redraw is requested right after creating the animation.
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
scene.animate(some_sprite, x = 50, y = 100)
"""
if not self.tweener: # here we complain
raise Exception("pytweener was not found. Include it to enable animations")
tween = self.tweener.add_tween(sprite,
duration=duration,
easing=easing,
on_complete=on_complete,
on_update=on_update,
**kwargs)
self.redraw()
return tween
def redraw(self):
"""Queue redraw. The redraw will be performed not more often than
the `framerate` allows"""
if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already
self.__drawing_queued = True
self._last_frame_time = dt.datetime.now()
gobject.timeout_add(1000 / self.framerate, self.__redraw_loop)
def __redraw_loop(self):
"""loop until there is nothing more to tween"""
self.queue_draw() # this will trigger do_expose_event when the current events have been flushed
self.__drawing_queued = self.tweener and self.tweener.has_tweens()
return self.__drawing_queued
def do_expose_event(self, event):
context = self.window.cairo_create()
# clip to the visible part
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
if self.background_color:
color = self.colors.parse(self.background_color)
context.set_source_rgb(*color)
context.fill_preserve()
context.clip()
if self.scale:
aspect_x = self.width / self._original_width
aspect_y = self.height / self._original_height
if self.keep_aspect:
aspect_x = aspect_y = min(aspect_x, aspect_y)
context.scale(aspect_x, aspect_y)
self.mouse_x, self.mouse_y, mods = self.get_window().get_pointer()
self._redraw_in_progress = True
# update tweens
now = dt.datetime.now()
delta = (now - (self._last_frame_time or dt.datetime.now())).microseconds / 1000000.0
self._last_frame_time = now
if self.tweener:
self.tweener.update(delta)
self.fps = 1 / delta
# start drawing
self.emit("on-enter-frame", context)
for sprite in self.sprites:
sprite._draw(context)
self.__check_mouse(self.mouse_x, self.mouse_y)
self.emit("on-finish-frame", context)
self._redraw_in_progress = False
def do_configure_event(self, event):
if self._original_width is None:
self._original_width = float(event.width)
self._original_height = float(event.height)
self.width, self.height = event.width, event.height
def all_sprites(self, sprites = None):
"""Returns flat list of the sprite tree for simplified iteration"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
yield sprite
if sprite.sprites:
for child in self.all_sprites(sprite.sprites):
yield child
def all_visible_sprites(self, sprites = None):
"""Returns flat list of just the visible sprites - avoid children whos
parents are not displayed"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
if sprite.visible:
yield sprite
if sprite.sprites:
for child in self.all_visible_sprites(sprite.sprites):
yield child
def get_sprite_at_position(self, x, y):
"""Returns the topmost visible interactive sprite for given coordinates"""
over = None
for sprite in self.all_visible_sprites():
if (sprite.interactive or sprite.draggable) and self.__check_hit(sprite, x, y):
over = sprite
return over
def __check_hit(self, sprite, x, y):
if sprite == self._drag_sprite:
return True
return sprite.check_hit(x, y)
def __check_mouse(self, x, y):
if x is None or self._mouse_in == False:
return
cursor = gtk.gdk.ARROW # default
if self.mouse_cursor is not None:
cursor = self.mouse_cursor
#check if we have a mouse over
over = self.get_sprite_at_position(x, y)
if over:
if over.mouse_cursor is not None:
cursor = over.mouse_cursor
elif self.mouse_cursor is None:
# resort to defaults
if over.draggable:
cursor = gtk.gdk.FLEUR
else:
cursor = gtk.gdk.HAND2
if over != self._mouse_sprite:
over.emit("on-mouse-over")
self.emit("on-mouse-over", over)
self.redraw()
if self._mouse_sprite and self._mouse_sprite != over:
self._mouse_sprite.emit("on-mouse-out")
self.emit("on-mouse-out", self._mouse_sprite)
self.redraw()
self._mouse_sprite = over
if cursor == False:
cursor = self._blank_cursor
if not self.__last_cursor or cursor != self.__last_cursor:
if isinstance(cursor, gtk.gdk.Cursor):
self.window.set_cursor(cursor)
else:
self.window.set_cursor(gtk.gdk.Cursor(cursor))
self.__last_cursor = cursor
""" mouse events """
def __on_mouse_move(self, area, event):
state = event.state
if self._drag_sprite and self._drag_sprite.draggable \
and gtk.gdk.BUTTON1_MASK & event.state:
# dragging around
drag_started = (self.__drag_start_x is not None and \
(self.__drag_start_x - event.x) ** 2 + \
(self.__drag_start_y - event.y) ** 2 > self.drag_distance ** 2)
if drag_started and not self.__drag_started:
matrix = cairo.Matrix()
if self._drag_sprite.parent and isinstance(self._drag_sprite.parent, Sprite):
# TODO - this currently works only until second level
# should take all parents into account
matrix.rotate(self._drag_sprite.parent.rotation)
matrix.invert()
x1,y1 = matrix.transform_point(self.__drag_start_x,
self.__drag_start_y)
self._drag_sprite.drag_x = x1 - self._drag_sprite.x
self._drag_sprite.drag_y = y1 - self._drag_sprite.y
self._drag_sprite.emit("on-drag-start", event)
self.emit("on-drag-start", self._drag_sprite, event)
self.redraw()
self.__drag_started = self.__drag_started or drag_started
if self.__drag_started:
matrix = cairo.Matrix()
if self._drag_sprite.parent and isinstance(self._drag_sprite.parent, Sprite):
# TODO - this currently works only until second level
# should take all parents into account
matrix.rotate(self._drag_sprite.parent.rotation)
matrix.invert()
mouse_x, mouse_y = matrix.transform_point(event.x, event.y)
new_x = mouse_x - self._drag_sprite.drag_x
new_y = mouse_y - self._drag_sprite.drag_y
self._drag_sprite.x, self._drag_sprite.y = new_x, new_y
self._drag_sprite.emit("on-drag", event)
self.emit("on-drag", self._drag_sprite, event)
self.redraw()
return
else:
# avoid double mouse checks - the redraw will also check for mouse!
if not self.__drawing_queued:
self.__check_mouse(event.x, event.y)
self.emit("on-mouse-move", event)
def __on_mouse_enter(self, area, event):
self._mouse_in = True
def __on_mouse_leave(self, area, event):
self._mouse_in = False
if self._mouse_sprite:
self.emit("on-mouse-out", self._mouse_sprite)
self.redraw()
self._mouse_sprite = None
def __on_button_press(self, area, event):
target = self.get_sprite_at_position(event.x, event.y)
self.__drag_start_x, self.__drag_start_y = event.x, event.y
self._drag_sprite = target
if self._drag_sprite and self._drag_sprite.draggable == False:
self._drag_sprite = None
if target:
target.emit("on-mouse-down", event)
self.emit("on-mouse-down", event)
def __on_button_release(self, area, event):
# trying to not emit click and drag-finish at the same time
target = self.get_sprite_at_position(event.x, event.y)
click = not self.__drag_started or (event.x - self.__drag_start_x) ** 2 + \
(event.y - self.__drag_start_y) ** 2 < self.drag_distance
if (click and self.__drag_started == False) or not self._drag_sprite:
if target:
target.emit("on-click", event)
self.emit("on-click", event, target)
self.redraw()
if self._drag_sprite:
self._drag_sprite.emit("on-drag-finish", event)
self.emit("on-drag-finish", self._drag_sprite, event)
self.redraw()
self._drag_sprite.drag_x, self._drag_sprite.drag_y = None, None
self._drag_sprite = None
self.__drag_started = False
self.__drag_start_x, self__drag_start_y = None, None
if target:
target.emit("on-mouse-up", event)
self.emit("on-mouse-up", event)
def __on_scroll(self, area, event):
self.emit("on-scroll", event)
| gpl-3.0 |
PabloPiaggi/lammps | tools/i-pi/ipi/inputs/normalmodes.py | 41 | 3951 | """Deals with creating the normal mode representation arrays.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputNormalModes: Deals with creating the normal mode objects.
"""
import numpy as np
from copy import copy
from ipi.engine.normalmodes import *
from ipi.utils.inputvalue import *
from ipi.utils.units import *
__all__ = ['InputNormalModes']
class InputNormalModes(InputArray):
""" Storage class for NormalModes engine.
Describes how normal-modes transformation and integration should be
performed.
Attributes:
mode: Specifies the method by which the dynamical masses are created.
transform: Specifies whether the normal mode calculation will be
done using a FFT transform or a matrix multiplication.
"""
attribs = copy(InputArray.attribs)
attribs["mode"] = (InputAttribute, {"dtype" : str,
"default" : "rpmd",
"help" : "Specifies the technique to be used to calculate the dynamical masses. 'rpmd' simply assigns the bead masses the physical mass. 'manual' sets all the normal mode frequencies except the centroid normal mode manually. 'pa-cmd' takes an argument giving the frequency to set all the non-centroid normal modes to. 'wmax-cmd' is similar to 'pa-cmd', except instead of taking one argument it takes two ([wmax,wtarget]). The lowest-lying normal mode will be set to wtarget for a free particle, and all the normal modes will coincide at frequency wmax. ",
"options" : ['pa-cmd', 'wmax-cmd', 'manual', 'rpmd']})
attribs["transform"] = (InputValue,{"dtype" : str,
"default" : "fft",
"help" : "Specifies whether to calculate the normal mode transform using a fast Fourier transform or a matrix multiplication. For small numbers of beads the matrix multiplication may be faster.",
"options" : ['fft', 'matrix']})
default_help = "Deals with the normal mode transformations, including the adjustment of bead masses to give the desired ring polymer normal mode frequencies if appropriate. Takes as arguments frequencies, of which different numbers must be specified and which are used to scale the normal mode frequencies in different ways depending on which 'mode' is specified."
default_label = "NORMALMODES"
def __init__(self, help=None, dimension=None, default=None, dtype=None):
""" Initializes InputNormalModes.
Just calls the parent initialization function with appropriate arguments.
"""
super(InputNormalModes,self).__init__(help=help, default=default, dtype=float, dimension="frequency")
def store(self, nm):
"""Takes a normal modes instance and stores a minimal representation
of it.
Args:
nm: A normal modes object.
"""
super(InputNormalModes,self).store(nm.nm_freqs)
self.mode.store(nm.mode)
self.transform.store(nm.transform_method)
def fetch(self):
"""Creates a normal modes object.
Returns:
A normal modes object.
"""
super(InputNormalModes,self).check()
return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() )
| gpl-2.0 |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/django/db/models/sql/compiler.py | 52 | 49792 | from django.utils.six.moves import zip
from django.core.exceptions import FieldError
from django.db import transaction
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
# For local fields (even if through proxy) the model should
# be None.
if model == opts.concrete_model:
model = None
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.model._meta
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, col, alias, _, _ = self._setup_joins(parts, opts, None)
col, alias = self._final_join_removal(col, alias)
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.model._meta.ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, col, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
col, alias = self._final_join_removal(col, alias)
return [(alias, col, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, _, _ = self.query.setup_joins(pieces,
opts, alias, False)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, col, alias, joins, opts
def _final_join_removal(self, col, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
if col != join.rhs_join_col:
break
self.query.unref_alias(alias)
alias = join.lhs_alias
col = join.lhs_join_col
return col, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
if (len(self.query.model._meta.fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + select_cols
for col in cols:
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
# Set transaction dirty if we're using SELECT FOR UPDATE to ensure
# a subsequent commit/rollback is executed, so any database locks
# are released.
if self.query.select_for_update and transaction.is_managed(self.using):
transaction.set_dirty(self.using)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select_fields:
fields = self.query.select_fields
else:
fields = self.query.model._meta.fields
fields = fields + self.query.related_select_fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| apache-2.0 |
frankk00/realtor | atom/mock_http.py | 278 | 4474 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
| bsd-3-clause |
assamite/creamas | creamas/serializers.py | 1 | 1074 | """
.. py:module:: serializers
:platform: Unix
Predefined serializers for routers.
"""
import pickle
from numpy import array, ndarray
from creamas.core.artifact import Artifact
def get_serializers():
"""Get all basic serializers defined in this module as a list.
"""
return [artifact_serializer, array_serializer, ndarray_serializer]
def artifact_serializer():
"""Basic serializer for :class¨:`~creamas.core.artifact.Artifact` objects
using pickle.
This serializer requires attr:`~aiomas.codecs.MsgPack` codec to work.
"""
return Artifact, pickle.dumps, pickle.loads
def array_serializer():
"""Basic serializer for :class¨:`~numpy.array` objects using pickle.
This serializer requires attr:`~aiomas.codecs.MsgPack` codec to work.
"""
return array, pickle.dumps, pickle.loads
def ndarray_serializer():
"""Basic serializer for :class¨:`~numpy.ndarray` objects using pickle.
This serializer requires attr:`~aiomas.codecs.MsgPack` codec to work.
"""
return ndarray, pickle.dumps, pickle.loads
| gpl-2.0 |
nomaro/SickBeard_Backup | sickbeard/notifiers/pushbullet.py | 6 | 5572 | # Author: Pedro Correia (http://github.com/pedrocorreia/)
# Based on pushalot.py by Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import socket
import base64
from httplib import HTTPSConnection, HTTPException
import json
from ssl import SSLError
import sickbeard
from sickbeard import logger, common
class PushbulletNotifier:
def test_notify(self, pushbullet_api):
return self._sendPushbullet(pushbullet_api, event="Test", message="Testing Pushbullet settings from Sick Beard", method="POST", notificationType="note", force=True)
def get_devices(self, pushbullet_api):
return self._sendPushbullet(pushbullet_api, method="GET", force=True)
def get_channels(self, pushbullet_api):
return self._sendPushbullet(pushbullet_api, method="GET", force=True, event="getChannels")
def notify_snatch(self, ep_name):
if sickbeard.PUSHBULLET_NOTIFY_ONSNATCH:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SNATCH], message=ep_name, notificationType="note", method="POST")
def notify_download(self, ep_name):
if sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD], message=ep_name, notificationType="note", method="POST")
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], message=ep_name + ": " + lang, notificationType="note", method="POST")
def _sendPushbullet(self, pushbullet_api=None, pushbullet_device=None, event=None, message=None, notificationType=None, method=None, force=False):
if not sickbeard.USE_PUSHBULLET and not force:
return False
if pushbullet_api == None:
pushbullet_api = sickbeard.PUSHBULLET_API
if pushbullet_device == None:
pushbullet_device = sickbeard.PUSHBULLET_DEVICE
if method == 'POST':
uri = '/v2/pushes'
else:
uri = '/v2/devices'
if event == 'getChannels':
uri = '/v2/channels'
logger.log(u"Pushbullet event: " + str(event), logger.DEBUG)
logger.log(u"Pushbullet message: " + str(message), logger.DEBUG)
logger.log(u"Pushbullet api: " + str(pushbullet_api), logger.DEBUG)
logger.log(u"Pushbullet devices: " + str(pushbullet_device), logger.DEBUG)
logger.log(u"Pushbullet notification type: " + str(notificationType), logger.DEBUG)
http_handler = HTTPSConnection("api.pushbullet.com")
if notificationType == None:
testMessage = True
try:
logger.log(u"Testing Pushbullet authentication and retrieving the device list.", logger.DEBUG)
http_handler.request(method, uri, None, headers={'Authorization': 'Bearer %s' % pushbullet_api})
except (SSLError, HTTPException, socket.error):
logger.log(u"Pushbullet notification failed.", logger.ERROR)
return False
else:
testMessage = False
try:
device = pushbullet_device.split(':');
if device[0] == 'device':
data = {
'title': event.encode('utf-8'),
'body': message.encode('utf-8'),
'device_iden': device[1],
'type': notificationType}
else:
data = {
'title': event.encode('utf-8'),
'body': message.encode('utf-8'),
'channel_tag': device[1],
'type': notificationType}
data = json.dumps(data)
http_handler.request(method, uri, body=data,
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % pushbullet_api})
pass
except (SSLError, HTTPException, socket.error):
return False
response = http_handler.getresponse()
request_body = response.read()
request_status = response.status
logger.log(u"Pushbullet response: %s" % request_body, logger.DEBUG)
if request_status == 200:
if testMessage:
return request_body
else:
logger.log(u"Pushbullet notifications sent.", logger.DEBUG)
return True
elif request_status == 410:
logger.log(u"Pushbullet auth failed: %s" % response.reason, logger.ERROR)
return False
else:
logger.log(u"Pushbullet notification failed.", logger.ERROR)
return False
notifier = PushbulletNotifier
| gpl-3.0 |
sorenk/ansible | lib/ansible/plugins/action/ironware.py | 28 | 3772 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
import json
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ironware.ironware import ironware_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(ironware_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ironware'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending end to device', self._play_context.remote_addr)
conn.send_command('end')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
cosenal/osf.io | website/oauth/views.py | 7 | 1558 | # -*- coding: utf-8 -*-
import httplib as http
from flask import redirect
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.oauth.models import ExternalAccount
from website.oauth.utils import get_service
from website.oauth.signals import oauth_complete
@must_be_logged_in
def oauth_disconnect(external_account_id, auth):
account = ExternalAccount.load(external_account_id)
user = auth.user
if account is None:
HTTPError(http.NOT_FOUND)
if account not in user.external_accounts:
HTTPError(http.FORBIDDEN)
# iterate AddonUserSettings for addons
for user_settings in user.get_oauth_addons():
user_settings.revoke_oauth_access(account)
user_settings.save()
# ExternalAccount.remove_one(account)
# # only after all addons have been dealt with can we remove it from the user
user.external_accounts.remove(account)
user.save()
@must_be_logged_in
def oauth_connect(service_name, auth):
service = get_service(service_name)
return redirect(service.auth_url)
@must_be_logged_in
def oauth_callback(service_name, auth):
user = auth.user
provider = get_service(service_name)
# Retrieve permanent credentials from provider
if not provider.auth_callback(user=user):
return {}
if provider.account not in user.external_accounts:
user.external_accounts.append(provider.account)
user.save()
oauth_complete.send(provider, account=provider.account, user=user)
return {}
| apache-2.0 |
ccccccccccc/personal-file-sharing-center | web/debugerror.py | 68 | 12346 | """
pretty debug errors
(part of web.py)
portions adapted from Django <djangoproject.com>
Copyright (c) 2005, the Lawrence Journal-World
Used under the modified BSD license:
http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
"""
__all__ = ["debugerror", "djangoerror", "emailerrors"]
import sys, urlparse, pprint, traceback
from template import Template
from net import websafe
from utils import sendmail, safestr
import webapi as web
import os, os.path
whereami = os.path.join(os.getcwd(), __file__)
whereami = os.path.sep.join(whereami.split(os.path.sep)[:-1])
djangoerror_t = """\
$def with (exception_type, exception_value, frames)
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>$exception_type at $ctx.path</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table {
border:1px solid #ccc; border-collapse: collapse; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%;}
table td.code div { overflow:hidden; }
table.source th { color:#666; }
table.source td {
font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; }
ul.traceback li.frame { margin-bottom:1em; }
div.context { margin: 10px 0; }
div.context ol {
padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li {
font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
div.context ol.context-line li { color:black; background-color:#ccc; }
div.context ol.context-line li span { float: right; }
div.commands { margin-left: 40px; }
div.commands a { color:black; text-decoration:none; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
</style>
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon;
// Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
//-->
</script>
</head>
<body>
$def dicttable (d, kls='req', id=None):
$ items = d and d.items() or []
$items.sort()
$:dicttable_items(items, kls, id)
$def dicttable_items(items, kls='req', id=None):
$if items:
<table class="$kls"
$if id: id="$id"
><thead><tr><th>Variable</th><th>Value</th></tr></thead>
<tbody>
$for k, v in items:
<tr><td>$k</td><td class="code"><div>$prettify(v)</div></td></tr>
</tbody>
</table>
$else:
<p>No data.</p>
<div id="summary">
<h1>$exception_type at $ctx.path</h1>
<h2>$exception_value</h2>
<table><tr>
<th>Python</th>
<td>$frames[0].filename in $frames[0].function, line $frames[0].lineno</td>
</tr><tr>
<th>Web</th>
<td>$ctx.method $ctx.home$ctx.path</td>
</tr></table>
</div>
<div id="traceback">
<h2>Traceback <span>(innermost first)</span></h2>
<ul class="traceback">
$for frame in frames:
<li class="frame">
<code>$frame.filename</code> in <code>$frame.function</code>
$if frame.context_line is not None:
<div class="context" id="c$frame.id">
$if frame.pre_context:
<ol start="$frame.pre_context_lineno" class="pre-context" id="pre$frame.id">
$for line in frame.pre_context:
<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>
</ol>
<ol start="$frame.lineno" class="context-line"><li onclick="toggle('pre$frame.id', 'post$frame.id')">$frame.context_line <span>...</span></li></ol>
$if frame.post_context:
<ol start='${frame.lineno + 1}' class="post-context" id="post$frame.id">
$for line in frame.post_context:
<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>
</ol>
</div>
$if frame.vars:
<div class="commands">
<a href='#' onclick="return varToggle(this, '$frame.id')"><span>▶</span> Local vars</a>
$# $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
</div>
$:dicttable(frame.vars, kls='vars', id=('v' + str(frame.id)))
</li>
</ul>
</div>
<div id="requestinfo">
$if ctx.output or ctx.headers:
<h2>Response so far</h2>
<h3>HEADERS</h3>
$:dicttable_items(ctx.headers)
<h3>BODY</h3>
<p class="req" style="padding-bottom: 2em"><code>
$ctx.output
</code></p>
<h2>Request information</h2>
<h3>INPUT</h3>
$:dicttable(web.input(_unicode=False))
<h3 id="cookie-info">COOKIES</h3>
$:dicttable(web.cookies())
<h3 id="meta-info">META</h3>
$ newctx = [(k, v) for (k, v) in ctx.iteritems() if not k.startswith('_') and not isinstance(v, dict)]
$:dicttable(dict(newctx))
<h3 id="meta-info">ENVIRONMENT</h3>
$:dicttable(ctx.env)
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>web.config.debug</code>
set to <code>True</code>. Set that to <code>False</code> if you don't want to see this.
</p>
</div>
</body>
</html>
"""
djangoerror_r = None
def djangoerror():
def _get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
try:
source = open(filename).readlines()
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = \
[line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = \
[line.strip('\n') for line in source[lineno + 1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
except (OSError, IOError, IndexError):
return None, [], None, []
exception_type, exception_value, tback = sys.exc_info()
frames = []
while tback is not None:
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
lineno = tback.tb_lineno - 1
# hack to get correct line number for templates
lineno += tback.tb_frame.f_locals.get("__lineoffset__", 0)
pre_context_lineno, pre_context, context_line, post_context = \
_get_lines_from_file(filename, lineno, 7)
if '__hidetraceback__' not in tback.tb_frame.f_locals:
frames.append(web.storage({
'tback': tback,
'filename': filename,
'function': function,
'lineno': lineno,
'vars': tback.tb_frame.f_locals,
'id': id(tback),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno,
}))
tback = tback.tb_next
frames.reverse()
urljoin = urlparse.urljoin
def prettify(x):
try:
out = pprint.pformat(x)
except Exception, e:
out = '[could not display: <' + e.__class__.__name__ + \
': '+str(e)+'>]'
return out
global djangoerror_r
if djangoerror_r is None:
djangoerror_r = Template(djangoerror_t, filename=__file__, filter=websafe)
t = djangoerror_r
globals = {'ctx': web.ctx, 'web':web, 'dict':dict, 'str':str, 'prettify': prettify}
t.t.func_globals.update(globals)
return t(exception_type, exception_value, frames)
def debugerror():
"""
A replacement for `internalerror` that presents a nice page with lots
of debug information for the programmer.
(Based on the beautiful 500 page from [Django](http://djangoproject.com/),
designed by [Wilson Miner](http://wilsonminer.com/).)
"""
return web._InternalError(djangoerror())
def emailerrors(to_address, olderror, from_address=None):
"""
Wraps the old `internalerror` handler (pass as `olderror`) to
additionally email all errors to `to_address`, to aid in
debugging production websites.
Emails contain a normal text traceback as well as an
attachment containing the nice `debugerror` page.
"""
from_address = from_address or to_address
def emailerrors_internal():
error = olderror()
tb = sys.exc_info()
error_name = tb[0]
error_value = tb[1]
tb_txt = ''.join(traceback.format_exception(*tb))
path = web.ctx.path
request = web.ctx.method + ' ' + web.ctx.home + web.ctx.fullpath
message = "\n%s\n\n%s\n\n" % (request, tb_txt)
sendmail(
"your buggy site <%s>" % from_address,
"the bugfixer <%s>" % to_address,
"bug: %(error_name)s: %(error_value)s (%(path)s)" % locals(),
message,
attachments=[
dict(filename="bug.html", content=safestr(djangoerror()))
],
)
return error
return emailerrors_internal
if __name__ == "__main__":
urls = (
'/', 'index'
)
from application import application
app = application(urls, globals())
app.internalerror = debugerror
class index:
def GET(self):
thisdoesnotexist
app.run()
| gpl-2.0 |
akash1808/nova | nova/wsgi/nova-ec2-api.py | 50 | 1284 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI script for Nova EC2 API
EXPERIMENTAL support script for running Nova EC2 API under Apache2 etc.
"""
from oslo_config import cfg
from oslo_log import log as logging
from paste import deploy
from nova import config
from nova import objects
from nova import service # noqa
from nova import utils
CONF = cfg.CONF
config_files = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
config.parse_args([], default_config_files=config_files)
LOG = logging.getLogger(__name__)
logging.setup(CONF, "nova")
utils.monkey_patch()
objects.register_all()
conf = config_files[0]
name = "ec2"
options = deploy.appconfig('config:%s' % conf, name=name)
application = deploy.loadapp('config:%s' % conf, name=name)
| apache-2.0 |
antotodd/lab5 | main/lib/werkzeug/contrib/atom.py | 311 | 15281 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| mit |
SunghanKim/numpy | numpy/distutils/command/build_scripts.py | 264 | 1731 | """ Modified version of build_scripts that handles building scripts from functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run (self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
| bsd-3-clause |
ndardenne/pymatgen | pymatgen/analysis/defects/dilute_solution_model.py | 4 | 53811 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
from __future__ import division
"""
Evaluate the defect concentration based on composition, temperature,
and defect energies using "Dilute Solution Model"
Reference: Phys Rev B, 63, 094103, 2001,
"Density of constitutional and thermal point defects in L12 Al3Sc",
C. Woodward, M. Asta, G. Kresse and J. Hafner.
Manual and citation for the code, DOI: 10.1016/j.cpc.2015.03.015
"""
__author__ = 'Bharat Medasani'
__version__ = "0.2"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__status__ = "Alpha"
__date__ = "6/4/14"
import math
import copy
import numpy as np
from six.moves import zip
from monty.dev import requires
from monty.fractions import gcd
try:
from sympy import Symbol, nsolve, Integer, Float, Matrix, exp, solve, Eq
sympy_found = True
except ImportError:
sympy_found = False
# physical consts
k_B=8.6173324e-5 # eV/K
# Check the inputs
def check_input(def_list):
flag = True
for defect in def_list:
if not defect:
flag = False
break
return flag
@requires(sympy_found,
"dilute_solution_model requires Sympy module. Please install it.")
def dilute_solution_model(structure, e0, vac_defs, antisite_defs, T,
trial_chem_pot = None, generate='plot'):
"""
Compute the defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
generate (string): Options are plot or energy
Chemical potentials are also returned with energy option.
If energy option is not chosen, plot is generated.
Returns:
If generate=plot, the plot data is generated and returned in
HighCharts format.
If generate=energy, defect formation enthalpies and chemical
potentials are returned.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n))
mu = [Symbol('mu'+i.__str__()) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(n):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(n):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = n
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n,n,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species[k]:
dC[i,j,k] = 1
for j in range(n):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
for j in range(n):
for i in range(n):
if i != j:
if site_species[j] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n):
dE.append([])
for i in range(n):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
dE = np.array(dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n,n,[0]*n**2)
for i in range(n):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(dC[j,epi,p]) \
for j in range(n)])
flip = Integer(dC[i,epi,p]) * \
exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[-1]/total_c[i] for i in range(m)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega2 = []
fm_en_eff = []
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*dC[j,epi,p_r] \
for j in range(n)])
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega2.append(k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T)))
fm_en_eff.append(dE[epi,p_r]-sum_mu)
used_dEs.append(dE[epi, p_r])
omega = omega1-sum(omega2)
# Compute composition range
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)*100-1
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)*100+1
delta = float(comp1_max-comp1_min)/120.0
yvals = []
for comp1 in np.arange(comp1_min,comp1_max+delta,delta):
comp2 = 100-comp1
y = comp2/comp1
yvals.append(y)
def reduce_mu():
omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
x = solve(omega)
return x
def compute_mus_by_search():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
y_vect = [specie_concen[-1]/specie_concen[i] for i in range(m)]
vector_func = [y_vect[i]-c_ratio[i] for i in range(m-1)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.01):
m0 = mu_red[mu[0]].subs(mu[-1],m1)
try:
x = nsolve(vector_func,mu,[m0,m1],module="numpy")
except:
continue
c_val = c.subs(dict(zip(mu,x)))
#if all(x >= 0 for x in c_val):
specie_concen = []
for ind in specie_site_index_map:
specie_concen.append(sum([sum(c_val[i,:]) for i in range(*ind)]))
y_comp = [specie_concen[-1]/specie_concen[i] for i in range(m)]
diff = math.sqrt(sum([pow(abs(y_comp[i]-y_vect[i]),2) for i in range(m)]))
if diff < min_diff:
min_diff = diff
mu_vals = x
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
else:
raise ValueError()
return mu_vals
def compute_def_formation_energies():
i = 0
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
formation_energies['vacancies'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$V_{'+site_specie+'}$'
else:
label = '$V_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['vacancies'][i]['label'] = label
i += 1
i = 0
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
formation_energy = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
formation_energies['antisites'][i]['formation_energy'] = formation_energy
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
formation_energies['antisites'][i]['label'] = label
i += 1
return formation_energies
# If generate option is energy compute effective formation energies
# at ideal stoichiometry and return the formation energies and chem pot.
if generate == 'energy':
if not trial_chem_pot:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
formation_energies = compute_def_formation_energies()
mu_dict = dict(zip(specie_order,mu_vals))
return formation_energies, mu_dict
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = yvals[0]
# Test if the trial mus are good
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
#mu_names = sorted([key.name for key in x.keys()])
mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
mu_vals.append(x[key])
vector_func = [comp_ratio - c_ratio[0]]
vector_func.append(omega)
try:
mu_vals = nsolve(vector_func,mu,mu_vals)
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus_by_search()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus_by_search()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
i = 0
len_y = len(yvals)
failed_y, failed_i = [], []
for y in yvals:
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
failed_y.append(y)
failed_i.append(i)
continue
result[y] = list(mu_vals)
x = None
i += 1
def get_next_mu_val(i):
if i >= len(yvals):
return None
y = yvals[i+1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i+1)
def get_prev_mu_val(i):
if i <= 0:
return None
y = yvals[i-1]
x = result.get(y,None)
if x:
mu_vals = [float(mu_val) for mu_val in x]
return mu_vals
else:
return get_next_mu_val(i-1)
# Try to get better trial mus for failed cases
for j in range(len(failed_y)):
i = failed_i[j]
prev_mu_val = get_prev_mu_val(i)
if not prev_mu_val:
continue
next_mu_val = get_next_mu_val(i)
if not next_mu_val:
continue
y = failed_y[j]
vector_func = [y-c_ratio[0]]
vector_func.append(omega)
trial_mu = list(map(lambda x: float(sum(x))/len(x), \
zip(prev_mu_val,next_mu_val)))
try:
x = nsolve(vector_func,mu,trial_mu,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
x = None
# Alternate way of calculating trial mus for failed cases
# by taking average of trial mus at extremes.
#for j in range(len(failed_y)):
# y = yvals[0]
# prev_mu_val = result[y]
# y = yvals[-1]
# next_mu_val = result[y]
#
# trial_mu = list(map(lambda x: float(sum(x))/len(x), \
# zip(prev_mu_val,next_mu_val)))
# y = failed_y[j]
# vector_func = [y-c_ratio[0]]
# vector_func.append(omega)
# try:
# x = nsolve(vector_func,mu,trial_mu,module="numpy")
# if x:
# mu_vals = [float(mu_val) for mu_val in x]
# except:
# continue
# result[y] = list(mu_vals)
if len(result.keys()) < len(yvals)/2:
raise ValueError('Not sufficient data')
res = []
new_mu_dict = {}
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
res1 = []
# Concentration of first element/over total concen
res1.append(float(total_c_val[0]/sum(total_c_val)))
new_mu_dict[res1[0]] = mu_val
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n) for j in range(n)]
res1 = np.sort(res.view(dtype), order=[str('x')],axis=0)
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
conc = []
for i in range(n):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n):
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
y_data = []
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m): # Antisite plot dat
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
# Compute the formation energies
def compute_vac_formation_energies(mu_vals):
en = []
for vac_def in vac_defs:
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
uncor_energy = vac_def['energy']
formation_energy = uncor_energy + mu_vals[ind]
en.append(float(formation_energy))
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_vac_formation_energies(mu_val))
en_data = {'x_label':els[0]+' mole fraction', 'x':[]}
en_data['x'] = [dat[0][0] for dat in res1] # x-axis data
i = 0
y_data = []
for vac_def in vac_defs:
data = [data[i] for data in en_res]
site_specie = vac_def['site_specie']
ind = specie_order.index(site_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+site_specie+'}$'
else:
label = vac_string+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
def compute_as_formation_energies(mu_vals):
en = []
for as_def in antisite_defs:
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
uncor_energy = as_def['energy']
form_en = uncor_energy + mu_vals[ind1] - mu_vals[ind2]
en.append(form_en)
return en
en_res = []
for key in sorted(new_mu_dict.keys()):
mu_val = new_mu_dict[key]
en_res.append(compute_as_formation_energies(mu_val))
i = 0
for as_def in antisite_defs:
data = [data[i] for data in en_res]
site_specie = as_def['site_specie']
sub_specie = as_def['substitution_specie']
ind1 = specie_order.index(site_specie)
ind2 = specie_order.index(sub_specie)
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
y_data.append({'data':data,'name':label})
i += 1
en_data['y'] = y_data
# Return chem potential as well
mu_data = {'x_label':els[0]+' mole fraction', 'x':[]}
mu_data['x'] = [dat[0][0] for dat in res1] # x-axis data
y_data = []
for j in range(m):
specie = specie_order[j]
mus = [new_mu_dict[key][j] for key in sorted(new_mu_dict.keys())]
y_data.append({'data':mus, 'name':specie})
mu_data['y'] = y_data
return conc_data, en_data, mu_data
@requires(sympy_found,
"comute_defect_density requires Sympy module. Please install it.")
def compute_defect_density(structure, e0, vac_defs, antisite_defs, T=800,
trial_chem_pot=None, plot_style="highcharts"):
"""
Wrapper for the dilute_solution_model.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
conc_data, en_data, mu_data = dilute_solution_model(
structure,e0,vac_defs,antisite_defs,T,
trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = conc_data['x_label']
hgh_chrt_data['yAxis'] = conc_data['y_label']
series = []
x = conc_data['x']
for y_data in conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data):
rows = []
labels = []
labels.append(inp_data['x_label'])
labels += [y['name'] for y in inp_data['y']]
#labels.sort()
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
conc_rows = data_to_rows(conc_data)
en_rows = data_to_rows(en_data)
mu_rows = data_to_rows(mu_data)
return conc_rows, en_rows, mu_rows
#solute_site_preference_finder is based on dilute_solution_model and so most
#of the code is same. However differences exist in setting up and processing
#hence new function
@requires(sympy_found, "solute_site_preference_finder requires Sympy module. "\
"Please install it.")
def solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, trial_chem_pot = None):
"""
Compute the solute defect densities using dilute solution model.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
T: Temperature in Kelvin
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite
defect are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
trial_chem_pot: Trial chemical potentials to speedup the plot
generation. Format is {el1:mu1,...}
Returns:
plot_data: The data for plotting the solute defect concentration.
"""
if not check_input(vac_defs):
raise ValueError('Vacancy energy is not defined')
if not check_input(antisite_defs):
raise ValueError('Antisite energy is not defined')
formation_energies = {}
formation_energies['vacancies'] = copy.deepcopy(vac_defs)
formation_energies['antisites'] = copy.deepcopy(antisite_defs)
formation_energies['solute'] = copy.deepcopy(solute_defs)
for vac in formation_energies['vacancies']:
del vac['energy']
for asite in formation_energies['antisites']:
del asite['energy']
for solute in formation_energies['solute']:
del solute['energy']
# Setup the system
site_species = [vac_def['site_specie'] for vac_def in vac_defs]
solute_specie = solute_defs[0]['substitution_specie']
site_species.append(solute_specie)
multiplicity = [vac_def['site_multiplicity'] for vac_def in vac_defs]
m = len(set(site_species)) # distinct species
n = len(vac_defs) # inequivalent sites
# Reduce the system and associated parameters such that only distinctive
# atoms are retained
comm_div = gcd(*tuple(multiplicity))
multiplicity = [val/comm_div for val in multiplicity]
multiplicity.append(0)
e0 = e0/comm_div
T = Float(T)
#c0 = np.diag(multiplicity)
c0 = np.diag(np.ones(n+1))
c0[n,n] = 0
mu = [Symbol('mu'+str(i)) for i in range(m)]
# Generate maps for hashing
# Generate specie->mu map and use it for site->mu map
specie_order = [] # Contains hash for site->mu map Eg: [Al, Ni]
site_specie_set = set() # Eg: {Ni, Al}
for i in range(len(site_species)):
site_specie = site_species[i]
if site_specie not in site_specie_set:
site_specie_set.add(site_specie)
specie_order.append(site_specie)
site_mu_map = [] # Eg: [mu0,mu0,mu0,mu1] where mu0->Al, and mu1->Ni
for i in range(len(site_species)):
site_specie = site_species[i]
j = specie_order.index(site_specie)
site_mu_map.append(j)
specie_site_index_map = [] # Eg: [(0,3),(3,4)] for Al & Ni
for i in range(m):
low_ind = site_species.index(specie_order[i])
if i < m-1:
hgh_ind = site_species.index(specie_order[i+1])
else:
hgh_ind = len(site_species)
specie_site_index_map.append((low_ind,hgh_ind))
"""
dC: delta concentration matrix:
dC[i,j,k]: Concentration change of atom i, due to presence of atom
j on lattice site k
Special case is [i,i,i] which is considered as vacancy
Few cases: dC[i,i,i] = -1 due to being vacancy special case
dC[k,k,i] = +1 due to increment in k at i lattice if i
lattice type is of different element
dC[i,k,i] = -1 due to decrement of ith type atom due to
presence of kth type atom on ith sublattice and kth type
atom specie is different from ith sublattice atom specie
dC[i,k,k] = 0 due to no effect on ith type atom
dC[i,j,k] = 0 if i!=j!=k
"""
dC = np.zeros((n+1,n+1,n), dtype=np.int)
for i in range(n):
for j in range(n):
for k in range(n):
if i == j and site_species[j] != site_species[k] and \
site_species[i] != site_species:
dC[i,j,k] = 1
for j in range(n+1):
for k in range(n):
if i == k:
dC[i,j,k] = -1
for k in range(n):
dC[n,n,k] = 1
for k in range(n):
for j in range(n):
if i != j:
if site_species[i] == site_species[k]:
dC[i,j,k] = 0
for ind_map in specie_site_index_map:
if ind_map[1]-ind_map[0] > 1:
for index1 in range(ind_map[0]+1,ind_map[1]):
for index2 in range(ind_map[0]):
for i in range(n):
dC[i,index1,index2] = 0
for index2 in range(ind_map[1],n):
for i in range(n):
dC[i,index1,index2] = 0
# dE matrix: Flip energies (or raw defect energies)
els = [vac_def['site_specie'] for vac_def in vac_defs]
dE = []
for i in range(n+1):
dE.append([])
for i in range(n+1):
for j in range(n):
dE[i].append(0)
for j in range(n):
for i in range(n):
if i == j:
dE[i][j] = vac_defs[i]['energy']
else:
sub_specie = vac_defs[i]['site_specie']
site_specie = vac_defs[j]['site_specie']
if site_specie == sub_specie:
dE[i][j] = 0
else:
for as_def in antisite_defs:
if int(as_def['site_index']) == j+1 and \
sub_specie == as_def['substitution_specie']:
dE[i][j] = as_def['energy']
break
# Solute
site_specie = vac_defs[j]['site_specie']
for solute_def in solute_defs:
def_site_ind = int(solute_def['site_index'])
def_site_specie = solute_def['site_specie']
if def_site_specie == site_specie and def_site_ind == j+1:
dE[n][j] = solute_def['energy']
break
dE = np.array(dE)
#np.where(dE == np.array(None), 0, dE)
# Initialization for concentrations
# c(i,p) == presence of ith type atom on pth type site
c = Matrix(n+1,n,[0]*n*(n+1))
for i in range(n+1):
for p in range(n):
c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n+1):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n+1)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
c[i,p] += flip
host_c = Matrix(n,n,[0]*n*n)
for i in range(n):
for p in range(n):
host_c[i,p] = Integer(c0[i,p])
site_flip_contribs = []
for epi in range(n):
sum_mu = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p]) for j in range(n)])
flip = dC[i,epi,p] * exp(-(dE[epi,p]-sum_mu)/(k_B*T))
if flip not in site_flip_contribs:
site_flip_contribs.append(flip)
host_c[i,p] += flip
#specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
#total_c = [sum(c[ind[0]:ind[1]]) for ind in specie_site_index_map]
total_c = []
for ind in specie_site_index_map:
val = 0
for i in range(*ind):
sum_i = sum([c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
total_c.append(val)
c_ratio = [total_c[i]/sum(total_c) for i in range(m)]
host_total_c = []
for ind in specie_site_index_map[:-1]:
val = 0
for i in range(*ind):
sum_i = sum([host_c[i,j]*multiplicity[j] for j in range(n)])
val += sum_i
host_total_c.append(val)
host_c_ratio = [host_total_c[i]/sum(host_total_c) for i in range(m-1)]
# Expression for Omega, the Grand Potential
omega1 = e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:])*multiplicity[i] \
for i in range(n)])
omega = omega1
used_dEs = []
for p_r in range(n):
for epi in range(n):
sum_mu1 = sum([mu[site_mu_map[j]]*Integer(
dC[j,epi,p_r]) for j in range(n)])
sum_mu = sum_mu1 - mu[site_mu_map[n]]* dC[n,epi,p_r]
if p_r != epi and site_mu_map[p_r] == site_mu_map[epi]:
continue
if dE[epi,p_r] not in used_dEs:
omega1 -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu1)/(k_B*T))
omega -= k_B*T*multiplicity[p_r] * \
exp(-(dE[epi,p_r]-sum_mu)/(k_B*T))
used_dEs.append(dE[epi,p_r])
# Compute composition ranges
max_host_specie_concen = 1-solute_concen
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
comp1_min = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen - 0.01
comp1_max = sum(multiplicity[li:hi])/sum(multiplicity)* \
max_host_specie_concen + 0.01
delta = (comp1_max - comp1_min)/50.0
#def reduce_mu():
# omega = [e0 - sum([mu[site_mu_map[i]]*sum(c0[i,:]) for i in range(n)])]
# x = solve(omega)
# return x
def reduce_mu():
host_concen = 1-solute_concen
new_c0 = c0.astype(float)
for i in range(n):
new_c0[i,i] = host_concen*c0[i,i]
new_c0[n,n] = 2*solute_concen
omega = [
e0-sum([mu[site_mu_map[i]]*sum(new_c0[i,:])
for i in range(n+1)])]
x = solve(omega)
return x
def compute_solute_mu_by_lin_search(host_mu_vals):
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
min_diff = 1e10
mu_vals = None
c_val = None
m1_min = -20.0
if e0 > 0:
m1_max = 10 # Search space needs to be modified
else:
m1_max = 0
for m1 in np.arange(m1_min,m1_max,0.1):
trial_mus = host_mu_vals+[m1]
try:
x = nsolve(vector_func,mu,trial_mus,module="numpy")
if x:
mu_vals = [float(mu_val) for mu_val in x]
break
except:
continue
else:
raise ValueError()
return mu_vals
def compute_mus():
# Compute trial mu
mu_red = reduce_mu()
mult = multiplicity
specie_concen = [
sum(mult[ind[0]:ind[1]]) for ind in specie_site_index_map]
max_host_specie_concen = 1-solute_concen
host_specie_concen_ratio = [specie_concen[i]/sum(specie_concen)* \
max_host_specie_concen for i in range(m)]
host_specie_concen_ratio[-1] = solute_concen
y_vect = host_specie_concen_ratio
vector_func = [y_vect[i]-c_ratio[i] for i in range(m)]
vector_func.append(omega)
mu_vals = None
c_val = None
m_min = -15.0
if e0 > 0:
m_max = 10 # Search space needs to be modified
else:
m_max = 0
for m1 in np.arange(m_min,m_max,0.3):
for m2 in np.arange(m_min,m_max,0.3):
m0 = mu_red[mu[0]].subs([(mu[1],m1),(mu[2],m2)])
try:
mu_vals = nsolve(vector_func,mu,[m0,m1,m2],module="numpy")
# Line needs to be modified to include all mus when n > 2
except:
continue
break
if mu_vals:
mu_vals = [float(mu_val) for mu_val in mu_vals]
break
else:
raise ValueError("Couldn't find mus")
return mu_vals
if not trial_chem_pot:
# Try computing mus by assuming one of the defects is dominant at 0.01
# concen. First vacancy is tried and then antisite
# Generate trial mus assuming vacancy as dominant defect
#for specie-0 at lower yval
li = specie_site_index_map[0][0]
hi = specie_site_index_map[0][1]
li1 = specie_site_index_map[1][0]
hi1 = specie_site_index_map[1][1]
spec_mult = [sum(multiplicity[li:hi]), sum(multiplicity[li1:hi1])]
ln_def_conc = 4.60517
for i in range(li,hi):
vac_flip_en = vac_defs[i]['energy']
mu_vals = [ln_def_conc*k_B*T -vac_flip_en]
mu_vals.append((e0 - spec_mult[0]*mu_vals[0]) / spec_mult[1])
comp_ratio = comp1_min
# Test if the trial mus are good
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go for antisite as dominant defect
mu_gs = [Symbol('mu_gs'+j.__str__()) for j in range(m-1)]
eqs = [mu_gs[0]-mu_gs[1] - (ln_def_conc*k_B*T-antisite_defs[i][
'energy'])]
eqs.append(spec_mult[0]*mu_gs[0] + spec_mult[1]*mu_gs[1] - e0)
x = solve(eqs, mu_gs)
host_mu_vals = []
for key in sorted(x.keys(),key=lambda inp: inp.name):
host_mu_vals.append(x[key])
vector_func = [comp_ratio - host_c_ratio[0]]
vector_func.append(omega1)
try:
host_mu_vals = nsolve(vector_func,mu[:-1],host_mu_vals)
if host_mu_vals:
host_mu_vals = [float(mu_val) for mu_val in host_mu_vals]
mu_vals = compute_solute_mu_by_lin_search(host_mu_vals)
break
except: # Go to the default option (search the space)
pass
else:
mu_vals = compute_mus()
else:
try:
mu_vals = [trial_chem_pot[element] for element in specie_order]
except:
mu_vals = compute_mus()
# Compile mu's for all composition ratios in the range
#+/- 1% from the stoichiometry
result = {}
for y in np.arange(comp1_min,comp1_max+delta,delta):
y_vect = []
y_vect.append(y)
y2 = max_host_specie_concen - y
y_vect.append(y2)
y_vect.append(solute_concen)
vector_func = [y_vect[i]-c_ratio[i] for i in range(1,m)]
vector_func.append(omega)
try:
x = nsolve(vector_func,mu,mu_vals)
if x:
mu_vals = [float(mu_val) for mu_val in x]
except:
continue
result[y] = mu_vals
res = []
# Compute the concentrations for all the compositions
for key in sorted(result.keys()):
mu_val = result[key]
total_c_val = [total_c[i].subs(dict(zip(mu,mu_val))) \
for i in range(len(total_c))]
c_val = c.subs(dict(zip(mu,mu_val)))
# Concentration of first element/over total concen
res1 = []
res1.append(float(total_c_val[0]/sum(total_c_val)))
sum_c0 = sum([c0[i,i] for i in range(n)])
for i in range(n+1):
for j in range(n):
if i == j: # Vacancy
vac_conc = float(exp(-(mu_val[site_mu_map[i]]+dE[i,i])/(k_B*T)))
res1.append(vac_conc)
else: # Antisite
res1.append(float(c_val[i,j]/c0[j,j]))
res.append(res1)
res = np.array(res)
dtype = [(str('x'),np.float64)]+[(str('y%d%d' % (i, j)), np.float64) \
for i in range(n+1) for j in range(n)]
res1 = np.sort(res.view(dtype),order=[str('x')],axis=0)
conc = []
for i in range(n+1):
conc.append([])
for j in range(n):
conc[i].append([])
for i in range(n+1): # Append vacancies
for j in range(n):
y1 = [dat[0][i*n+j+1] for dat in res1]
conc[i][j] = y1
# Compute solute site preference
# Removing the functionality
#site_pref_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
#site_pref_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
#site_pref_data['x_label'] = els[0]+ "_mole_fraction"
#site_pref_data['y_label'] = "$"+solute_specie+"_{"+els[0]+"}/("+\
# solute_specie+"_{"+els[0]+"}+"+solute_specie+"_{"+els[1]+"})$"
#y_data = []
#inds = specie_site_index_map[m-1]
#data1 = np.sum([multiplicity[0]*conc[ind][0] for ind in range(*inds)],axis=0)
#data2 = np.sum([multiplicity[1]*conc[ind][1] for ind in range(*inds)],axis=0)
#frac_data = data1/(data1+data2)
#frac_data = frac_data.tolist()
#y_data.append({'data':frac_data})
#site_pref_data['y'] = y_data
# Return all defect concentrations
conc_data = {}
"""Because all the plots have identical x-points storing it in a
single array"""
conc_data['x'] = [dat[0][0] for dat in res1] # x-axis data
# Element whose composition is varied. For x-label
conc_data['x_label'] = els[0]+ " mole fraction"
conc_data['y_label'] = "Point defect concentration"
y_data = []
# Vacancy
for i in range(n):
data = conc[i][i]
specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
vac_string = "$Vac_{"
if not specie_ind_del-1:
label = vac_string+specie+'}$'
else:
label = vac_string+specie+'_'+str(cur_ind)+'}$'
# Plot data and legend info
y_data.append({'data':data,'name':label})
# Antisites and solute
for i in range(n):
site_specie = els[i]
specie_ind = site_mu_map[i]
indices = specie_site_index_map[specie_ind]
specie_ind_del = indices[1]-indices[0]
cur_ind = i - indices[0] + 1
for j in range(m):
sub_specie = specie_order[j]
if sub_specie == site_specie:
continue
if not specie_ind_del-1:
label = '$'+sub_specie+'_{'+site_specie+'}$'
else:
label = '$'+sub_specie+'_{'+site_specie+'_'+str(cur_ind)+'}$'
inds = specie_site_index_map[j]
# TODO: Investigate the value below
data = np.sum([conc[ind][i] for ind in range(*inds)],axis=0)
data = data.tolist()
y_data.append({'data':data,'name':label})
conc_data['y'] = y_data
#return site_pref_data, conc_data
return conc_data
@requires(sympy_found,
"solute_defect_density requires Sympy module. Please install it.")
def solute_defect_density(structure, e0, vac_defs, antisite_defs, solute_defs,
solute_concen=0.01, T=800, trial_chem_pot = None,
plot_style="highchargs"):
"""
Wrapper for the solute_site_preference_finder.
The computed plot data is prepared based on plot_style.
Args:
structure: pymatgen.core.structure.Structure object representing the
primitive or unitcell of the crystal.
e0: The total energy of the undefected system.
This is E0 from VASP calculation.
vac_defs: List of vacancy defect parameters in the dictionary format.
The keys of the dict associated with each vacancy defect are
1) site_index, 2) site_specie, 3) site_multiplicity, and
4) energy. 1-3 can be obtained from
pymatgen.analysis.defects.point_defects.Vacancy class.
Site index is expected to start with 1 (fortran index).
antisite_defs: List of antisite defect parameters in the dictionary
format. The keys of the dict associated with each antisite defect
are 1) site_index, 2) site_specie, 3) site_multiplicity,
4) substitution_specie, and 5) energy. 1-3 can be obtained
from pymatgen.analysis.defects.point_defects.Vacancy class.
solute_defs: List of solute defect parameters in the dictionary
format. Similary to that of antisite defs, wtih solute specie
specified in substitution_specie
solute_concen: Solute concentration (in fractional value)
T: Temperature in Kelvin
trial_chem_pot (optional): Trial chemical potentials to speedup
the plot generation. Format is {el1:mu1,...}
plot_style (string): Allowed options are
1) highcharts (default)
2) gnuplot
Returns:
The plot data is generated and returned in asked format.
"""
#solute_site_pref_data, def_conc_data = solute_site_preference_finder(
def_conc_data = solute_site_preference_finder(
structure, e0, T, vac_defs, antisite_defs, solute_defs,
solute_concen=solute_concen, trial_chem_pot=trial_chem_pot)
if plot_style == 'highcharts':
"Energy data is ignored in this mode"
hgh_chrt_data = {}
hgh_chrt_data['xAxis'] = def_conc_data['x_label']
hgh_chrt_data['yAxis'] = def_conc_data['y_label']
series = []
x = def_conc_data['x']
for y_data in def_conc_data['y']:
y = y_data['data']
xy = zip(x,y)
xy = [list(el) for el in xy]
name = y_data['name'].strip('$')
flds= name.split('_')
def_string = flds[0]
site_string = flds[1].strip('{}')
name = def_string+"<sub>"+site_string+"</sub>"
#series.append({'data':xy, 'name':y_data['name']})
series.append({'data':xy, 'name':name})
hgh_chrt_data['series'] = series
return hgh_chrt_data
elif plot_style == 'gnuplot':
def data_to_rows(inp_data, y_lbl_flg):
rows = []
labels = []
labels.append(inp_data['x_label'])
if y_lbl_flg:
labels.append(inp_data['y_label'])
else:
labels += [y['name'] for y in inp_data['y']]
rows.append('#'+'\t'.join(labels))
m = len(inp_data['x'])
for i in range(m):
data = []
data.append(inp_data['x'][i])
data += [y['data'][i] for y in inp_data['y']]
data = [float(x) for x in data]
rows.append('\t'.join(list(map(str,data))))
return rows
#solute_site_pref_rows = data_to_rows(solute_site_pref_data, True)
pt_def_conc_rows = data_to_rows(def_conc_data, False)
#return solute_site_pref_rows, pt_def_conc_rows
return pt_def_conc_rows
| mit |
murgatroid99/grpc | examples/python/interceptors/headers/request_header_validator_interceptor.py | 31 | 1367 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interceptor that ensures a specific header is present."""
import grpc
def _unary_unary_rpc_terminator(code, details):
def terminate(ignored_request, context):
context.abort(code, details)
return grpc.unary_unary_rpc_method_handler(terminate)
class RequestHeaderValidatorInterceptor(grpc.ServerInterceptor):
def __init__(self, header, value, code, details):
self._header = header
self._value = value
self._terminator = _unary_unary_rpc_terminator(code, details)
def intercept_service(self, continuation, handler_call_details):
if (self._header,
self._value) in handler_call_details.invocation_metadata:
return continuation(handler_call_details)
else:
return self._terminator
| apache-2.0 |
yamt/tempest | tempest/manager.py | 26 | 2954 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import auth
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
CONF = config.CONF
class Manager(object):
"""
Base manager class
Manager objects are responsible for providing a configuration object
and a client object for a test case to use in performing actions.
"""
def __init__(self, credentials=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
standard username/password/tenant_name[/domain_name] is used.
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
if credentials is None:
self.credentials = cred_provider.get_configured_credentials('user')
else:
self.credentials = credentials
# Check if passed or default credentials are valid
if not self.credentials.is_valid():
raise exceptions.InvalidCredentials()
# Tenant isolation creates TestResources, but Accounts and some tests
# creates Credentials
if isinstance(credentials, cred_provider.TestResources):
creds = self.credentials.credentials
else:
creds = self.credentials
# Creates an auth provider for the credentials
self.auth_provider = get_auth_provider(creds)
# FIXME(andreaf) unused
self.client_attr_names = []
def get_auth_provider_class(credentials):
if isinstance(credentials, auth.KeystoneV3Credentials):
return auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
else:
return auth.KeystoneV2AuthProvider, CONF.identity.uri
def get_auth_provider(credentials):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
auth_provider_class, auth_url = get_auth_provider_class(
credentials)
return auth_provider_class(credentials, auth_url, **default_params)
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/setuptools-18.0.1/setuptools/tests/test_easy_install.py | 4 | 18326 | # -*- coding: utf-8 -*-
"""Easy install Tests
"""
from __future__ import absolute_import
import sys
import os
import shutil
import tempfile
import site
import contextlib
import tarfile
import logging
import itertools
import distutils.errors
import pytest
try:
from unittest import mock
except ImportError:
import mock
from setuptools import sandbox
from setuptools import compat
from setuptools.compat import StringIO, BytesIO, urlparse
from setuptools.sandbox import run_setup
import setuptools.command.easy_install as ei
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import working_set
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
import pkg_resources
from .py26compat import tarfile_open
from . import contexts
from .textwrap import DALS
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
SETUP_PY = DALS("""
from setuptools import setup
setup(name='foo')
""")
class TestEasyInstallTest:
def test_install_site_py(self):
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
assert os.path.exists(sitepy)
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
header = ei.CommandSpec.best().from_environment().as_header()
expected = header + DALS("""
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""")
dist = FakeDist()
args = next(ei.ScriptWriter.get_args(dist))
name, script = itertools.islice(args, 2)
assert script == expected
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
assert cmd.package_index.scanned_urls == {}
# let's try without it (default behavior)
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
assert keys == ['link1', 'link2']
def test_write_exception(self):
"""
Test that `cant_write_to_target` is rendered as a DistutilsError.
"""
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.install_dir = os.getcwd()
with pytest.raises(distutils.errors.DistutilsError):
cmd.cant_write_to_target()
class TestPTHFileWriter:
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
assert not pth.dirty
pth.add(PRDistribution(os.getcwd()))
assert pth.dirty
def test_add_from_site_is_ignored(self):
location = '/test/location/does-not-have-to-exist'
# PthDistributions expects all locations to be normalized
location = pkg_resources.normalize_path(location)
pth = PthDistributions('does-not_exist', [location, ])
assert not pth.dirty
pth.add(PRDistribution(location))
assert not pth.dirty
@pytest.yield_fixture
def setup_context(tmpdir):
with (tmpdir/'setup.py').open('w') as f:
f.write(SETUP_PY)
with tmpdir.as_cwd():
yield tmpdir
@pytest.mark.usefixtures("user_override")
@pytest.mark.usefixtures("setup_context")
class TestUserInstallTest:
# prevent check that site-packages is writable. easy_install
# shouldn't be writing to system site-packages during finalize
# options, but while it does, bypass the behavior.
prev_sp_write = mock.patch(
'setuptools.command.easy_install.easy_install.check_site_dir',
mock.Mock(),
)
# simulate setuptools installed in user site packages
@mock.patch('setuptools.command.easy_install.__file__', site.USER_SITE)
@mock.patch('site.ENABLE_USER_SITE', True)
@prev_sp_write
def test_user_install_not_implied_user_site_enabled(self):
self.assert_not_user_site()
@mock.patch('site.ENABLE_USER_SITE', False)
@prev_sp_write
def test_user_install_not_implied_user_site_disabled(self):
self.assert_not_user_site()
@staticmethod
def assert_not_user_site():
# create a finalized easy_install command
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
assert not cmd.user, 'user should not be implied'
def test_multiproc_atexit(self):
pytest.importorskip('multiprocessing')
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
@pytest.fixture()
def foo_package(self, tmpdir):
egg_file = tmpdir / 'foo-1.0.egg-info'
with egg_file.open('w') as f:
f.write('Name: foo\n')
return str(tmpdir)
@pytest.yield_fixture()
def install_target(self, tmpdir):
target = str(tmpdir)
with mock.patch('sys.path', sys.path + [target]):
python_path = os.path.pathsep.join(sys.path)
with mock.patch.dict(os.environ, PYTHONPATH=python_path):
yield target
def test_local_index(self, foo_package, install_target):
"""
The local index must be used when easy_install locates installed
packages.
"""
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.install_dir = install_target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([foo_package])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(foo_package))
assert actual == expected
@contextlib.contextmanager
def user_install_setup_context(self, *args, **kwargs):
"""
Wrap sandbox.setup_context to patch easy_install in that context to
appear as user-installed.
"""
with self.orig_context(*args, **kwargs):
import setuptools.command.easy_install as ei
ei.__file__ = site.USER_SITE
yield
def patched_setup_context(self):
self.orig_context = sandbox.setup_context
return mock.patch(
'setuptools.sandbox.setup_context',
self.user_install_setup_context,
)
@pytest.yield_fixture
def distutils_package():
distutils_setup_py = SETUP_PY.replace(
'from setuptools import setup',
'from distutils.core import setup',
)
with contexts.tempdir(cd=os.chdir):
with open('setup.py', 'w') as f:
f.write(distutils_setup_py)
yield
class TestDistutilsPackage:
def test_bdist_egg_available_on_distutils_pkg(self, distutils_package):
run_setup('setup.py', ['bdist_egg'])
class TestSetupRequires:
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
with contexts.quiet():
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with contexts.tempdir() as temp_install_dir:
with contexts.environment(PYTHONPATH=temp_install_dir):
ei_params = [
'--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts',
'--install-dir', temp_install_dir,
dist_file,
]
with sandbox.save_argv(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
with pytest.raises(SystemExit):
easy_install_pkg.main(ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
assert 2 <= len(p_index.requests) <= 3
assert p_index.requests[0].path == '/does-not-exist/'
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with contexts.tempdir() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
script = DALS("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""")
make_trivial_sdist(dist_path, script)
yield dist_path
def test_setup_requires_overrides_version_conflict(self):
"""
Regression test for issue #323.
Ensures that a distribution's setup_requires requirements can still be
installed and used locally even if a conflicting version of that
requirement is already on the path.
"""
pr_state = pkg_resources.__getstate__()
fake_dist = PRDistribution('does-not-matter', project_name='foobar',
version='0.0')
working_set.add(fake_dist)
try:
with contexts.tempdir() as temp_dir:
test_pkg = create_setup_requires_package(temp_dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
with contexts.quiet() as (stdout, stderr):
# Don't even need to install the package, just
# running the setup.py at all is sufficient
run_setup(test_setup_py, ['--name'])
lines = stdout.readlines()
assert len(lines) > 0
assert lines[-1].strip(), 'test_pkg'
finally:
pkg_resources.__setstate__(pr_state)
def create_setup_requires_package(path):
"""Creates a source tree under path for a trivial test package that has a
single requirement in setup_requires--a tarball for that requirement is
also created and added to the dependency_links argument.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar==0.1'],
'dependency_links': [os.path.abspath(path)]
}
test_pkg = os.path.join(path, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
with open(test_setup_py, 'w') as f:
f.write(DALS("""
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
foobar_path = os.path.join(path, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
DALS("""
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
return test_pkg
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
with tarfile_open(dist_path, 'w:gz') as dist:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
class TestScriptHeader:
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
@pytest.mark.skipif(
sys.platform.startswith('java') and ei.is_sh(sys.executable),
reason="Test cannot run under java when executable is sh"
)
def test_get_script_header(self):
expected = '#!%s\n' % ei.nt_quote_arg(os.path.normpath(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python')
assert actual == expected
expected = '#!%s -x\n' % ei.nt_quote_arg(os.path.normpath
(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x')
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
expected = '#!%s -x\n' % self.non_ascii_exe
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable='"'+self.exe_with_spaces+'"')
expected = '#!"%s"\n' % self.exe_with_spaces
assert actual == expected
@pytest.mark.xfail(
compat.PY3 and os.environ.get("LC_CTYPE") in ("C", "POSIX"),
reason="Test fails in this locale on Python 3"
)
@mock.patch.dict(sys.modules, java=mock.Mock(lang=mock.Mock(System=
mock.Mock(getProperty=mock.Mock(return_value="")))))
@mock.patch('sys.platform', 'java1.5.0_13')
def test_get_script_header_jython_workaround(self, tmpdir):
# Create a mock sys.executable that uses a shebang line
header = DALS("""
#!/usr/bin/python
# -*- coding: utf-8 -*-
""")
exe = tmpdir / 'exe.py'
with exe.open('w') as f:
f.write(header)
exe = str(exe)
header = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python',
executable=exe)
assert header == '#!/usr/bin/env %s\n' % exe
expect_out = 'stdout' if sys.version_info < (2,7) else 'stderr'
with contexts.quiet() as (stdout, stderr):
# When options are included, generate a broken shebang line
# with a warning emitted
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x',
executable=exe)
assert candidate == '#!%s -x\n' % exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
with contexts.quiet() as (stdout, stderr):
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
assert candidate == '#!%s -x\n' % self.non_ascii_exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
class TestCommandSpec:
def test_custom_launch_command(self):
"""
Show how a custom CommandSpec could be used to specify a #! executable
which takes parameters.
"""
cmd = ei.CommandSpec(['/usr/bin/env', 'python3'])
assert cmd.as_header() == '#!/usr/bin/env python3\n'
def test_from_param_for_CommandSpec_is_passthrough(self):
"""
from_param should return an instance of a CommandSpec
"""
cmd = ei.CommandSpec(['python'])
cmd_new = ei.CommandSpec.from_param(cmd)
assert cmd is cmd_new
@mock.patch('sys.executable', TestScriptHeader.exe_with_spaces)
@mock.patch.dict(os.environ)
def test_from_environment_with_spaces_in_executable(self):
os.environ.pop('__PYVENV_LAUNCHER__', None)
cmd = ei.CommandSpec.from_environment()
assert len(cmd) == 1
assert cmd.as_header().startswith('#!"')
def test_from_simple_string_uses_shlex(self):
"""
In order to support `executable = /usr/bin/env my-python`, make sure
from_param invokes shlex on that input.
"""
cmd = ei.CommandSpec.from_param('/usr/bin/env my-python')
assert len(cmd) == 2
assert '"' not in cmd.as_header()
def test_sys_executable(self):
"""
CommandSpec.from_string(sys.executable) should contain just that param.
"""
writer = ei.ScriptWriter.best()
cmd = writer.command_spec_class.from_string(sys.executable)
assert len(cmd) == 1
assert cmd[0] == sys.executable
class TestWindowsScriptWriter:
def test_header(self):
hdr = ei.WindowsScriptWriter.get_script_header('')
assert hdr.startswith('#!')
assert hdr.endswith('\n')
hdr = hdr.lstrip('#!')
hdr = hdr.rstrip('\n')
# header should not start with an escaped quote
assert not hdr.startswith('\\"')
| mit |
abought/osf.io | website/addons/wiki/exceptions.py | 52 | 1131 | from modularodm.exceptions import ValidationValueError
from website.addons.base.exceptions import AddonError
class WikiError(AddonError):
"""Base exception class for Wiki-related error."""
pass
class NameEmptyError(WikiError, ValidationValueError):
"""Raised if user tries to provide an empty name value."""
pass
class NameInvalidError(WikiError, ValidationValueError):
"""Raised if user tries to provide a string containing an invalid character."""
pass
class NameMaximumLengthError(WikiError, ValidationValueError):
"""Raised if user tries to provide a name which exceeds the maximum accepted length."""
pass
class PageCannotRenameError(WikiError):
"""Raised if user tried to rename special wiki pages, e.g. home."""
pass
class PageConflictError(WikiError):
"""Raised if user tries to use an existing wiki page name."""
pass
class PageNotFoundError(WikiError):
"""Raised if user tries to access a wiki page that does not exist."""
pass
class InvalidVersionError(WikiError):
"""Raised if user tries to access a wiki page version that does not exist."""
pass
| apache-2.0 |
vrenaville/OCB | addons/stock_dropshipping/wizard/stock_invoice_onshipping.py | 51 | 2115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_invoice_onshipping(osv.osv_memory):
_inherit = "stock.invoice.onshipping"
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
pick = pickings and pickings[0]
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
pick_purchase = pick.move_lines and pick.move_lines[0].purchase_line_id and pick.move_lines[0].purchase_line_id.order_id.invoice_method == 'picking'
if pick.picking_type_id.code == 'outgoing' and src_usage == 'supplier' and dest_usage == 'customer' and pick_purchase:
return 'purchase'
else:
return super(stock_invoice_onshipping, self)._get_journal_type(cr, uid, context=context)
_defaults = {
'journal_type': _get_journal_type,
} | agpl-3.0 |
signed/intellij-community | python/lib/Lib/macpath.py | 87 | 7601 | """Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
i = p.rfind('.')
if i<=p.rfind(':'):
return p, ''
else:
return p[:i], p[i:]
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st.st_mode)
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return False
return S_ISREG(st.st_mode)
def getctime(filename):
"""Return the creation time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
def exists(s):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
st = os.stat(s)
except os.error:
return False
return True
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
n = min(len(s1), len(s2))
for i in xrange(n):
if s1[i] != s2[i]:
return s1[:i]
return s1[:n]
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
components = path.split(':')
path = components[0] + ':'
for c in components[1:]:
path = join(path, c)
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
return path
supports_unicode_filenames = False
| apache-2.0 |
sudosurootdev/external_chromium_org | chrome/test/mini_installer/registry_verifier.py | 36 | 5088 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import _winreg
import verifier
class RegistryVerifier(verifier.Verifier):
"""Verifies that the current registry matches the specified criteria."""
def _RootKeyConstant(self, root_key):
"""Converts a root registry key string into a _winreg.HKEY_* constant."""
root_key_mapping = {
'HKEY_CLASSES_ROOT': _winreg.HKEY_CLASSES_ROOT,
'HKEY_CURRENT_USER': _winreg.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': _winreg.HKEY_LOCAL_MACHINE,
'HKEY_USERS': _winreg.HKEY_USERS,
}
if root_key not in root_key_mapping:
raise KeyError("Unknown root registry key '%s'" % root_key)
return root_key_mapping[root_key]
def _ValueTypeConstant(self, value_type):
"""Converts a registry value type string into a _winreg.REG_* constant."""
value_type_mapping = {
'BINARY': _winreg.REG_BINARY,
'DWORD': _winreg.REG_DWORD,
'DWORD_LITTLE_ENDIAN': _winreg.REG_DWORD_LITTLE_ENDIAN,
'DWORD_BIG_ENDIAN': _winreg.REG_DWORD_BIG_ENDIAN,
'EXPAND_SZ': _winreg.REG_EXPAND_SZ,
'LINK': _winreg.REG_LINK,
'MULTI_SZ': _winreg.REG_MULTI_SZ,
'NONE': _winreg.REG_NONE,
'SZ': _winreg.REG_SZ,
}
if value_type not in value_type_mapping:
raise KeyError("Unknown registry value type '%s'" % value_type)
return value_type_mapping[value_type]
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Overridden from verifier.Verifier.
Verifies a registry key according to the |expectation|.
Args:
expectation_name: The registry key being verified. It is expanded using
Expand.
expectation: A dictionary with the following keys and values:
'exists' a string indicating whether the registry key's existence is
'required', 'optional', or 'forbidden'. Values are not checked if
an 'optional' key is not present in the registry.
'values' (optional) a dictionary where each key is a registry value
and its associated value is a dictionary with the following key
and values:
'type' (optional) a string indicating the type of the registry
value. If not present, the corresponding value is expected
to be absent in the registry.
'data' the associated data of the registry value if 'type' is
specified. If it is a string, it is expanded using Expand.
variable_expander: A VariableExpander object.
"""
key = variable_expander.Expand(expectation_name)
root_key, sub_key = key.split('\\', 1)
try:
# Query the Windows registry for the registry key. It will throw a
# WindowsError if the key doesn't exist.
key_handle = _winreg.OpenKey(self._RootKeyConstant(root_key), sub_key, 0,
_winreg.KEY_QUERY_VALUE)
except WindowsError:
# Key doesn't exist. See that it matches the expectation.
assert expectation['exists'] is not 'required', ('Registry key %s is '
'missing' % key)
# Values are not checked if the missing key's existence is optional.
return
# The key exists, see that it matches the expectation.
assert expectation['exists'] is not 'forbidden', ('Registry key %s exists' %
key)
# Verify the expected values.
if 'values' not in expectation:
return
for value, value_expectation in expectation['values'].iteritems():
# Query the value. It will throw a WindowsError if the value doesn't
# exist.
try:
data, value_type = _winreg.QueryValueEx(key_handle, value)
except WindowsError:
# The value does not exist. See that this matches the expectation.
assert 'type' not in value_expectation, ('Value %s of registry key %s '
'is missing' % (value, key))
continue
assert 'type' in value_expectation, ('Value %s of registry key %s exists '
'with value %s' % (value, key, data))
# Verify the type of the value.
expected_value_type = value_expectation['type']
assert self._ValueTypeConstant(expected_value_type) == value_type, \
"Value '%s' of registry key %s has unexpected type '%s'" % (
value, key, expected_value_type)
# Verify the associated data of the value.
expected_data = value_expectation['data']
if isinstance(expected_data, basestring):
expected_data = variable_expander.Expand(expected_data)
assert expected_data == data, \
("Value '%s' of registry key %s has unexpected data.\n"
" Expected: %s\n"
" Actual: %s" % (value, key, expected_data, data))
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow/source/numpy/f2py/__init__.py | 63 | 2038 | #!/usr/bin/env python
"""Fortran to Python Interface Generator.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['run_main', 'compile', 'f2py_testing']
import sys
from . import f2py2e
from . import f2py_testing
from . import diagnose
run_main = f2py2e.run_main
main = f2py2e.main
def compile(source,
modulename='untitled',
extra_args='',
verbose=True,
source_fn=None,
extension='.f'
):
"""
Build extension module from processing source with f2py.
Parameters
----------
source : str
Fortran source of module / subroutine to compile
modulename : str, optional
The name of the compiled python module
extra_args : str, optional
Additional parameters passed to f2py
verbose : bool, optional
Print f2py output to screen
source_fn : str, optional
Name of the file where the fortran source is written.
The default is to use a temporary file with the extension
provided by the `extension` parameter
extension : {'.f', '.f90'}, optional
Filename extension if `source_fn` is not provided.
The extension tells which fortran standard is used.
The default is `.f`, which implies F77 standard.
.. versionadded:: 1.11.0
"""
from numpy.distutils.exec_command import exec_command
import tempfile
if source_fn is None:
f = tempfile.NamedTemporaryFile(suffix=extension)
else:
f = open(source_fn, 'w')
try:
f.write(source)
f.flush()
args = ' -c -m {} {} {}'.format(modulename, f.name, extra_args)
c = '{} -c "import numpy.f2py as f2py2e;f2py2e.main()" {}'
c = c.format(sys.executable, args)
status, output = exec_command(c)
if verbose:
print(output)
finally:
f.close()
return status
from numpy.testing.nosetester import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
| mit |
tensorflow/probability | tensorflow_probability/python/distributions/blockwise_test.py | 1 | 17698 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
def _set_seed(seed):
"""Helper which uses graph seed if using eager."""
# TODO(b/68017812): Deprecate once eager correctly supports seed.
if tf.executing_eagerly():
tf.random.set_seed(seed)
return None
return seed
@test_util.test_all_tf_execution_regimes
class BlockwiseTest(test_util.TestCase):
def testDocstring1(self):
d = tfd.Blockwise(
[
tfd.Independent(
tfd.Normal(
loc=tf1.placeholder_with_default(
tf.zeros(4, dtype=tf.float64),
shape=None,
),
scale=1),
reinterpreted_batch_ndims=1),
tfd.MultivariateNormalTriL(
scale_tril=tf1.placeholder_with_default(
tf.eye(2, dtype=tf.float32), shape=None)),
],
dtype_override=tf.float32,
validate_args=True,
)
x = d.sample([2, 1], seed=test_util.test_seed())
y = d.log_prob(x)
x_, y_ = self.evaluate([x, y])
self.assertEqual((2, 1, 4 + 2), x_.shape)
self.assertDTypeEqual(x, np.float32)
self.assertEqual((2, 1), y_.shape)
self.assertDTypeEqual(y, np.float32)
self.assertAllClose(
np.zeros((6,), dtype=np.float32), self.evaluate(d.mean()))
def testDocstring2(self):
Root = tfd.JointDistributionCoroutine.Root # pylint: disable=invalid-name
def model():
e = yield Root(tfd.Independent(tfd.Exponential(rate=[100, 120]), 1))
g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])
n = yield Root(tfd.Normal(loc=0, scale=2.))
yield tfd.Normal(loc=n, scale=g)
joint = tfd.JointDistributionCoroutine(model)
d = tfd.Blockwise(joint, validate_args=True)
x = d.sample([2, 1], seed=test_util.test_seed())
y = d.log_prob(x)
x_, y_ = self.evaluate([x, y])
self.assertEqual((2, 1, 2 + 1 + 1 + 1), x_.shape)
self.assertDTypeEqual(x, np.float32)
self.assertEqual((2, 1), y_.shape)
self.assertDTypeEqual(y, np.float32)
def testSampleReproducible(self):
Root = tfd.JointDistributionCoroutine.Root # pylint: disable=invalid-name
def model():
e = yield Root(tfd.Independent(tfd.Exponential(rate=[100, 120]), 1))
g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])
n = yield Root(tfd.Normal(loc=0, scale=2.))
yield tfd.Normal(loc=n, scale=g)
joint = tfd.JointDistributionCoroutine(model)
d = tfd.Blockwise(joint, validate_args=True)
seed = test_util.test_seed()
tf.random.set_seed(seed)
x = d.sample([2, 1], seed=seed)
tf.random.set_seed(seed)
y = d.sample([2, 1], seed=seed)
x_, y_ = self.evaluate([x, y])
self.assertAllClose(x_, y_)
def testVaryingBatchShapeErrorStatic(self):
with self.assertRaisesRegexp(
ValueError, 'Distributions must have the same `batch_shape`'):
tfd.Blockwise(
[
tfd.Normal(tf.zeros(2), tf.ones(2)),
tfd.Normal(0., 1.),
],
validate_args=True,
)
def testVaryingBatchShapeErrorDynamicRank(self):
if tf.executing_eagerly():
return
with self.assertRaisesOpError(
'Distributions must have the same `batch_shape`'):
loc = tf1.placeholder_with_default(tf.zeros([2]), shape=None)
dist = tfd.Blockwise(
[
tfd.Normal(loc, tf.ones_like(loc)),
tfd.Independent(tfd.Normal(loc, tf.ones_like(loc)), 1),
],
validate_args=True,
)
self.evaluate(dist.mean())
def testVaryingBatchShapeErrorDynamicDims(self):
if tf.executing_eagerly():
return
with self.assertRaisesOpError(
'Distributions must have the same `batch_shape`'):
loc1 = tf1.placeholder_with_default(tf.zeros([1]), shape=None)
loc2 = tf1.placeholder_with_default(tf.zeros([2]), shape=None)
dist = tfd.Blockwise(
[
tfd.Normal(loc1, tf.ones_like(loc1)),
tfd.Normal(loc2, tf.ones_like(loc2)),
],
validate_args=True,
)
self.evaluate(dist.mean())
def testAssertValidSample(self):
loc1 = tf1.placeholder_with_default(tf.zeros([2]), shape=None)
loc2 = tf1.placeholder_with_default(tf.zeros([2]), shape=None)
dist = tfd.Blockwise(
[
tfd.Normal(loc1, tf.ones_like(loc1)),
tfd.Normal(loc2, tf.ones_like(loc2)),
],
validate_args=True,
)
with self.assertRaisesRegexp(
ValueError, 'must have at least one dimension'):
self.evaluate(dist.prob(3.))
def testKlBlockwiseIsSum(self):
gamma0 = tfd.Gamma(concentration=[1., 2., 3.], rate=1.)
gamma1 = tfd.Gamma(concentration=[3., 4., 5.], rate=1.)
normal0 = tfd.Normal(loc=tf.zeros(3), scale=2.)
normal1 = tfd.Normal(loc=tf.ones(3), scale=[2., 3., 4.])
d0 = tfd.Blockwise([
tfd.Independent(gamma0, reinterpreted_batch_ndims=1),
tfd.Independent(normal0, reinterpreted_batch_ndims=1)
],
validate_args=True)
d1 = tfd.Blockwise([
tfd.Independent(gamma1, reinterpreted_batch_ndims=1),
tfd.Independent(normal1, reinterpreted_batch_ndims=1)
],
validate_args=True)
kl_sum = tf.reduce_sum(
(tfd.kl_divergence(gamma0, gamma1) +
tfd.kl_divergence(normal0, normal1)))
blockwise_kl = tfd.kl_divergence(d0, d1)
kl_sum_, blockwise_kl_ = self.evaluate([kl_sum, blockwise_kl])
self.assertAllClose(kl_sum_, blockwise_kl_)
def testKLBlockwise(self):
# d0 and d1 are two MVN's that are 6 dimensional. Construct the
# corresponding MVNs, and ensure that the KL between the MVNs is close to
# the Blockwise ones.
# In both cases the scale matrix has a block diag structure, owing to
# independence of the component distributions.
d0 = tfd.Blockwise([
tfd.Independent(
tfd.Normal(loc=tf.zeros(4, dtype=tf.float64), scale=1.),
reinterpreted_batch_ndims=1),
tfd.MultivariateNormalTriL(
scale_tril=tf1.placeholder_with_default(
tf.eye(2, dtype=tf.float64), shape=None)),
],
validate_args=True)
d0_mvn = tfd.MultivariateNormalLinearOperator(
loc=np.float64([0.] * 6),
scale=tf.linalg.LinearOperatorBlockDiag([
tf.linalg.LinearOperatorIdentity(num_rows=4, dtype=tf.float64),
tf.linalg.LinearOperatorLowerTriangular(
tf.eye(2, dtype=tf.float64))
]))
d1 = tfd.Blockwise([
tfd.Independent(
tfd.Normal(loc=tf.ones(4, dtype=tf.float64), scale=1),
reinterpreted_batch_ndims=1),
tfd.MultivariateNormalTriL(
loc=tf.ones(2, dtype=tf.float64),
scale_tril=tf1.placeholder_with_default(
np.float64([[1., 0.], [2., 3.]]), shape=None)),
],
validate_args=True)
d1_mvn = tfd.MultivariateNormalLinearOperator(
loc=np.float64([1.] * 6),
scale=tf.linalg.LinearOperatorBlockDiag([
tf.linalg.LinearOperatorIdentity(num_rows=4, dtype=tf.float64),
tf.linalg.LinearOperatorLowerTriangular(
np.float64([[1., 0.], [2., 3.]]))
]))
blockwise_kl = tfd.kl_divergence(d0, d1)
mvn_kl = tfd.kl_divergence(d0_mvn, d1_mvn)
blockwise_kl_, mvn_kl_ = self.evaluate([blockwise_kl, mvn_kl])
self.assertAllClose(blockwise_kl_, mvn_kl_)
def testUnconstrainingBijector(self):
dist = tfd.Exponential(rate=[1., 2., 6.], validate_args=True)
blockwise_dist = tfd.Blockwise(dist, validate_args=True)
x = self.evaluate(
dist.experimental_default_event_space_bijector()(
tf.ones(dist.batch_shape)))
x_blockwise = self.evaluate(
blockwise_dist.experimental_default_event_space_bijector()(
tf.ones(blockwise_dist.batch_shape)))
self.assertAllEqual(x, x_blockwise)
@test_util.test_all_tf_execution_regimes
class BlockwiseTestStaticParams(test_util.TestCase):
use_static_shape = True
def _input(self, value):
"""Helper to create inputs with optional static shapes."""
value = tf.convert_to_tensor(value)
return tf1.placeholder_with_default(
value, shape=value.shape if self.use_static_shape else None)
@parameterized.named_parameters(
(
'Scalar',
lambda self: tfd.Normal(self._input(0.), self._input(1.)),
1,
[],
),
(
'Vector',
lambda self: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))), 1),
2,
[],
),
(
'Matrix',
lambda self: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(
self._input(tf.zeros([2, 3])), self._input(tf.ones([2, 3]))),
2),
6,
[],
),
(
'VectorBatch',
lambda self: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(
self._input(tf.zeros([2, 2])), self._input(tf.ones([2, 2]))),
1),
2,
[2],
),
)
def testSingleTensor(self, dist_fn, num_elements, batch_shape):
"""Checks that basic properties work with single Tensor distributions."""
base = dist_fn(self)
flat = tfd.Blockwise(base, validate_args=True)
if self.use_static_shape:
self.assertAllEqual([num_elements], flat.event_shape)
self.assertAllEqual([num_elements],
self.evaluate(flat.event_shape_tensor()))
if self.use_static_shape:
self.assertAllEqual(batch_shape, flat.batch_shape)
self.assertAllEqual(batch_shape, self.evaluate(flat.batch_shape_tensor()))
base_sample = self.evaluate(base.sample(3, seed=test_util.test_seed()))
flat_sample = self.evaluate(flat.sample(3, seed=test_util.test_seed()))
self.assertAllEqual([3] + batch_shape + [num_elements], flat_sample.shape)
base_sample = flat_sample.reshape(base_sample.shape)
base_log_prob = self.evaluate(base.log_prob(base_sample))
flat_log_prob = self.evaluate(flat.log_prob(flat_sample))
self.assertAllEqual([3] + batch_shape, flat_log_prob.shape)
self.assertAllClose(base_log_prob, flat_log_prob)
def _MakeModelFn(self):
Root = tfd.JointDistributionCoroutine.Root # pylint: disable=invalid-name
def model_fn():
yield Root(tfd.Normal(self._input(0.), self._input(1.)))
yield Root(
tfd.Independent(
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))), 1))
return model_fn
@parameterized.named_parameters(
(
'Sequential',
lambda self: tfd.JointDistributionSequential([ # pylint: disable=g-long-lambda
tfd.Normal(self._input(0.), self._input(1.)),
tfd.Independent(
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))),
1),
]),
[1, 2],
[],
),
(
'Named',
lambda self: tfd.JointDistributionNamed({ # pylint: disable=g-long-lambda
'a':
tfd.Normal(self._input(0.), self._input(1.)),
'b':
tfd.Independent(
tfd.Normal(
self._input(tf.zeros(2)), self._input(tf.ones(2))), 1
),
}),
[1, 2],
[],
),
(
'Coroutine',
lambda self: tfd.JointDistributionCoroutine(self._MakeModelFn()),
[1, 2],
[],
),
(
'SequentialMixedStaticShape',
lambda self: tfd.JointDistributionSequential([ # pylint: disable=g-long-lambda
tfd.Normal(0., 1.),
tfd.Independent(
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))),
1),
]),
[1, 2],
[],
),
(
'SequentialBatch',
lambda self: tfd.JointDistributionSequential([ # pylint: disable=g-long-lambda
tfd.Normal(tf.zeros(2), tf.ones(2)),
tfd.Independent(
tfd.Normal(
self._input(tf.zeros([2, 2])), self._input(
tf.ones([2, 2]))), 1),
]),
[1, 2],
[2],
),
)
def testJointDistribution(self, dist_fn, nums_elements, batch_shape):
"""Checks that basic properties work with JointDistribution."""
base = dist_fn(self)
num_elements = sum(nums_elements)
flat = tfd.Blockwise(base, validate_args=True)
if self.use_static_shape:
self.assertAllEqual([num_elements], flat.event_shape)
self.assertAllEqual([num_elements],
self.evaluate(flat.event_shape_tensor()))
if self.use_static_shape:
self.assertAllEqual(batch_shape, flat.batch_shape)
self.assertAllEqual(batch_shape, self.evaluate(flat.batch_shape_tensor()))
base_sample = self.evaluate(base.sample(3, seed=test_util.test_seed()))
base_sample_list = tf.nest.flatten(base_sample)
flat_sample = self.evaluate(flat.sample(3, seed=test_util.test_seed()))
self.assertAllEqual([3] + batch_shape + [num_elements], flat_sample.shape)
split_points = np.cumsum([0] + nums_elements)
base_sample_list = [
flat_sample[..., start:end].reshape(base_sample_part.shape)
for start, end, base_sample_part in zip(
split_points[:-1], split_points[1:], base_sample_list)
]
base_sample = tf.nest.pack_sequence_as(base_sample, base_sample_list)
base_log_prob = self.evaluate(base.log_prob(base_sample))
flat_log_prob = self.evaluate(flat.log_prob(flat_sample))
self.assertAllEqual([3] + batch_shape, flat_log_prob.shape)
self.assertAllClose(base_log_prob, flat_log_prob)
@parameterized.named_parameters(
(
'NoBatch',
lambda self: [ # pylint: disable=g-long-lambda
tfd.Normal(self._input(0.), self._input(1.)),
tfd.Independent(
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))),
1),
],
[1, 2],
[],
),
(
'MixedStaticShape',
lambda self: [ # pylint: disable=g-long-lambda
tfd.Normal(0., 1.),
tfd.Independent(
tfd.Normal(self._input(tf.zeros(2)), self._input(tf.ones(2))),
1),
],
[1, 2],
[],
),
(
'Batch',
lambda self: [ # pylint: disable=g-long-lambda
tfd.Normal(tf.zeros(2), tf.ones(2)),
tfd.Independent(
tfd.Normal(
self._input(tf.zeros([2, 2])), self._input(
tf.ones([2, 2]))), 1),
],
[1, 2],
[2],
),
)
def testDistributionList(self, dists_fn, nums_elements, batch_shape):
"""Checks that basic properties work with a list of distributions."""
bases = dists_fn(self)
num_elements = sum(nums_elements)
flat = tfd.Blockwise(bases, validate_args=True)
if self.use_static_shape:
self.assertAllEqual([num_elements], flat.event_shape)
self.assertAllEqual([num_elements],
self.evaluate(flat.event_shape_tensor()))
if self.use_static_shape:
self.assertAllEqual(batch_shape, flat.batch_shape)
self.assertAllEqual(batch_shape, self.evaluate(flat.batch_shape_tensor()))
base_sample_list = self.evaluate(
[base.sample(3, seed=test_util.test_seed()) for base in bases])
flat_sample = self.evaluate(flat.sample(3, seed=test_util.test_seed()))
self.assertAllEqual([3] + batch_shape + [num_elements], flat_sample.shape)
split_points = np.cumsum([0] + nums_elements)
base_sample_list = [
flat_sample[..., start:end].reshape(base_sample_part.shape)
for start, end, base_sample_part in zip(
split_points[:-1], split_points[1:], base_sample_list)
]
base_log_prob = sum(
self.evaluate([
base.log_prob(base_sample)
for base, base_sample in zip(bases, base_sample_list)
]))
flat_log_prob = self.evaluate(flat.log_prob(flat_sample))
self.assertAllEqual([3] + batch_shape, flat_log_prob.shape)
self.assertAllClose(base_log_prob, flat_log_prob)
class BlockwiseTestDynamicParams(BlockwiseTestStaticParams):
use_static_shape = False
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
wenderen/servo | tests/wpt/css-tests/tools/manifest/utils.py | 113 | 1370 | import os
import urlparse
from StringIO import StringIO
blacklist = ["/", "/tools/", "/resources/", "/common/", "/conformance-checkers/", "_certs"]
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path)
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def is_blacklisted(url):
for item in blacklist:
if item == "/":
if "/" not in url[1:]:
return True
elif url.startswith(item):
return True
return False
def from_os_path(path):
return path.replace(os.path.sep, "/")
def to_os_path(path):
return path.replace("/", os.path.sep)
class ContextManagerStringIO(StringIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return obj.__dict__[self.name]
| mpl-2.0 |
mcsalgado/ansible | lib/ansible/cli/playbook.py | 71 | 7832 | #!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
import os
import stat
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing import DataLoader
from ansible.utils.vars import load_extra_vars
from ansible.vars import VariableManager
#---------------------------------------------------------------------------------------------------
class PlaybookCLI(CLI):
''' code behind ansible playbook cli'''
def parse(self):
# create parser for CLI options
parser = CLI.base_parser(
usage = "%prog playbook.yml",
connect_opts=True,
meta_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
inventory_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.options, self.args = parser.parse_args()
self.parser = parser
if len(self.args) == 0:
raise AnsibleOptionsError("You must specify a playbook file to run")
self.display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
becomepass = None
vault_pass = None
passwords = {}
# don't deal with privilege escalation or passwords when we don't need to
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
loader.set_vault_password(vault_pass)
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in self.args:
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
# create the inventory, and filter it based on the subset specified (if any)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
self.display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(self.options.subset)
if len(inventory.list_hosts()) == 0 and no_hosts is False:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
self.display.display('\nplaybook: %s' % p['playbook'])
i = 1
for play in p['plays']:
if play.name:
playname = play.name
else:
playname = '#' + str(i)
msg = "\n PLAY: %s" % (playname)
mytags = set()
if self.options.listtags and play.tags:
mytags = mytags.union(set(play.tags))
msg += ' TAGS: [%s]' % (','.join(mytags))
if self.options.listhosts:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
self.display.display(msg)
if self.options.listtags or self.options.listtasks:
taskmsg = ' tasks:'
for block in play.compile():
if not block.has_tasks():
continue
j = 1
for task in block.block:
taskmsg += "\n %s" % task
if self.options.listtags and task.tags:
taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags)))
j = j + 1
self.display.display(taskmsg)
i = i + 1
return 0
else:
return results
| gpl-3.0 |
kracekumar/crawlit | crawlit/crawlit.py | 1 | 8527 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import reppy.cache
import tldextract
import requests
from BeautifulSoup import BeautifulSoup
from time import sleep
import datetime
import platform
import sys
import os
import logging
import json
import Queue
from urlparse import urlparse
#http://stackoverflow.com/questions/16506429/check-if-element-is-already-in-a-queue
class SetQueue(Queue.Queue):
def _init(self, maxsize):
self.queue = set()
def _put(self, item):
self.queue.add(item)
def _get(self):
return self.queue.pop()
def get_logger(name=u'crawlit'):
logger = logging.getLogger(name)
hdlr = logging.FileHandler(name + u'.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger
MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION = '0', '1', '3'
visited_urls = set([])
urls_to_visit = SetQueue()
sess = requests.Session()
# large number
count_to_stop = 10 ** 9
logger = get_logger()
def get_version():
return u'.'.join([MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION])
def default_user_agent(name="crawlit"):
"""Return a string representing the default user agent."""
#https://github.com/kennethreitz/requests/blob/master/requests/utils.py#L440
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return u" ".join(['{0}/{1}'.format(name, get_version()),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def fetch_robots_rules(url):
robots = reppy.cache.RobotsCache()
rules = robots.fetch(url)
return rules
def is_same_domain(url1, url2):
"""Check seedurl and other url belongs to same domain.
>>>is_same_domain("http://kracekumar.wordpress.com", "http://wordpress.com")
True
>>>is_same_domain("http://kracekumar.com", "http://tumblr.com")
False
"""
return tldextract.extract(url1).domain == tldextract.extract(url2).domain
def extract_links(data):
"""
parse the page and extract all links in the data.
"""
soup = BeautifulSoup(data)
for link in soup.findAll("a"):
for pair in link.attrs:
if pair[0] == u'href':
yield pair[1]
def update_queue(url, links):
"""Add the extracted links to queue"""
rules = fetch_robots_rules(url)
for link in links:
full_link = link
# Relative url, so make it full url
if link.startswith("/"):
full_link = make_abs_url(url, link)
# Check if url is already visited
# Check if crawler is allowed to fetch
# Allow only same domain request
# filter same page url like #disqus_thread
if not full_link in visited_urls and rules.allowed(full_link, user_agent) and is_same_domain(url, full_link) and not full_link.count("#"):
urls_to_visit.put_nowait(full_link)
def make_abs_url(seed_url, link):
parsed_url = urlparse(seed_url)
return parsed_url.scheme + "://" + parsed_url.netloc + link
def write_to_disk(url, data, dir_name, encoding="utf-8-sig"):
try:
# if not in right directory, change directory
if not os.getcwdu().endswith(dir_name):
os.chdir(dir_name)
with open(url.replace("/", "_") + ".html", "w") as f:
f.write(data)
except OSError, e:
print(e)
logger.error(e)
exit(1)
def crawl(url):
"""Crawl the page and extract links"""
if not url in visited_urls:
try:
r = sess.get(url, headers={'User-Agent': user_agent}, stream=True)
visited_urls.add(url)
# Don't download non html files
if r.headers['content-type'].startswith("text/html"):
# TODO: Use console logger
print url, datetime.datetime.now()
links = extract_links(r.text.encode("utf-8"))
update_queue(url, links)
return r.text.encode('utf-8', 'ignore')
# what if url is email address
except requests.exceptions.MissingSchema, e:
print(e)
return ""
except requests.ConnectionError, e:
# Any requests exception, log and don't quit.
print(e)
logger.error(e)
def crawl_and_store(url, dir_name):
data = crawl(url)
if data:
write_to_disk(url, data, dir_name)
user_agent = default_user_agent()
def main():
global count_to_stop
import argparse
parser = argparse.ArgumentParser(
version=get_version(),
description='crawl a given url')
parser.add_argument('url', metavar='url',
type=unicode, nargs='*', help='seed url')
parser.add_argument('--count', metavar='count',
type=int, help='max number of pages to fetch')
args = parser.parse_args()
if len(args.url) is 0:
parser.print_usage()
exit(1)
# Focus only on one site for time being
url = args.url[0]
count_to_stop = count_to_stop or args.count
total_pages_crawled = 0
recovery_file = "crawlit_queue.json"
# check if recovery_file exists and recover data
try:
if os.path.exists(recovery_file):
with open(recovery_file) as f:
data = json.load(f)
if 'seedurl' in data and data['seedurl'] == url:
if 'seedurl' in data:
for item in data['queue']:
urls_to_visit.put_nowait(item)
if 'count' in data:
total_pages_crawled = data['count']
except OSError, e:
print(e)
logger.error(e)
exit(1)
try:
# Create a directory to store all html files
dir_name = url.replace("/", "_")
if not os.path.isdir(dir_name):
try:
# file, directory name cannot contain /
os.mkdir(dir_name)
os.chdir(dir_name)
except OSError, e:
print(e)
logger.error(e)
exit(1)
crawl_and_store(url, dir_name=dir_name)
total_pages_crawled += 1
# Now create a gevent pool and start fetching url
rules = fetch_robots_rules(url)
delay = rules.delay(user_agent) or 0
while not urls_to_visit.empty():
start = datetime.datetime.now()
crawl_and_store(urls_to_visit.get_nowait(), dir_name=dir_name)
total_pages_crawled += 1
diff = (datetime.datetime.now() - start).total_seconds() - delay
if diff:
sleep(delay)
if total_pages_crawled >= count_to_stop:
msg = u"Maximum crawl count ({0}) hit.".format(count_to_stop)
logger.info(msg)
print(msg)
exit(1)
except KeyboardInterrupt, e:
# write all items to json file, so next time recover
d = {'seedurl': url, 'queue': [], 'count': total_pages_crawled}
os.chdir("..")
while not urls_to_visit.empty():
d['queue'].append(urls_to_visit.get_nowait().encode("utf-8"))
try:
with open(recovery_file, "w") as f:
json.dump(d, f)
logger.info(u"Dumped data to recovery file: {0}".format(recovery_file))
except OSError, e:
print(e)
logger.error(e)
exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause |
pougounias/codecombat | scripts/analytics/mixpanelLevelRates.py | 97 | 6288 | # Calculate level completion rates via mixpanel export API
# TODO: why are our 'time' fields in PST time?
targetLevels = ['dungeons-of-kithgard', 'the-raised-sword', 'endangered-burl']
eventFunnel = ['Started Level', 'Saw Victory']
import sys
from datetime import datetime, timedelta
from mixpanel import Mixpanel
try:
import json
except ImportError:
import simplejson as json
# NOTE: mixpanel dates are by day and inclusive
# E.g. '2014-12-08' is any date that day, up to 2014-12-09 12am
if __name__ == '__main__':
if not len(sys.argv) is 3:
print "Script format: <script> <api_key> <api_secret>"
else:
scriptStart = datetime.now()
api_key = sys.argv[1]
api_secret = sys.argv[2]
api = Mixpanel(
api_key = api_key,
api_secret = api_secret
)
# startDate = '2015-01-11'
# endDate = '2015-01-17'
startDate = '2015-01-23'
endDate = '2015-01-23'
# endDate = '2015-01-28'
startEvent = eventFunnel[0]
endEvent = eventFunnel[-1]
print("Requesting data for {0} to {1}".format(startDate, endDate))
data = api.request(['export'], {
'event' : eventFunnel,
'from_date' : startDate,
'to_date' : endDate
})
# Map ordering: level, user, event, day
userDataMap = {}
lines = data.split('\n')
print "Received %d entries" % len(lines)
for line in lines:
try:
if len(line) is 0: continue
eventData = json.loads(line)
eventName = eventData['event']
if not eventName in eventFunnel:
print 'Unexpected event ' + eventName
break
if not 'properties' in eventData: continue
properties = eventData['properties']
if not 'distinct_id' in properties: continue
user = properties['distinct_id']
if not 'time' in properties: continue
time = properties['time']
pst = datetime.fromtimestamp(int(properties['time']))
utc = pst + timedelta(0, 8 * 60 * 60)
dateCreated = utc.isoformat()
day = dateCreated[0:10]
if day < startDate or day > endDate:
print "Skipping {0}".format(day)
continue
if 'levelID' in properties:
level = properties['levelID']
elif 'level' in properties:
level = properties['level'].lower().replace(' ', '-')
else:
print("Unkonwn level for", eventName)
print(properties)
break
if not level in targetLevels:
continue
# print level
if not level in userDataMap: userDataMap[level] = {}
if not user in userDataMap[level]: userDataMap[level][user] = {}
if not eventName in userDataMap[level][user] or userDataMap[level][user][eventName] > day:
userDataMap[level][user][eventName] = day
except:
print "Unexpected error:", sys.exc_info()[0]
print line
break
# print(userDataMap)
levelFunnelData = {}
for level in userDataMap:
for user in userDataMap[level]:
funnelStartDay = None
for event in userDataMap[level][user]:
day = userDataMap[level][user][event]
if not level in levelFunnelData: levelFunnelData[level] = {}
if not day in levelFunnelData[level]: levelFunnelData[level][day] = {}
if not event in levelFunnelData[level][day]: levelFunnelData[level][day][event] = 0
if eventFunnel[0] == event:
levelFunnelData[level][day][event] += 1
funnelStartDay = day
break
if funnelStartDay:
for event in userDataMap[level][user]:
if not event in levelFunnelData[level][funnelStartDay]:
levelFunnelData[level][funnelStartDay][event] = 0
if not eventFunnel[0] == event:
levelFunnelData[level][funnelStartDay][event] += 1
for i in range(1, len(eventFunnel)):
event = eventFunnel[i]
if not event in levelFunnelData[level][funnelStartDay]:
levelFunnelData[level][funnelStartDay][event] = 0
# print(levelFunnelData)
totals = {}
for level in levelFunnelData:
for day in levelFunnelData[level]:
if startEvent in levelFunnelData[level][day]:
started = levelFunnelData[level][day][startEvent]
else:
started = 0
if endEvent in levelFunnelData[level][day]:
finished = levelFunnelData[level][day][endEvent]
else:
finished = 0
if not level in totals: totals[level] = {}
if not startEvent in totals[level]: totals[level][startEvent] = 0
if not endEvent in totals[level]: totals[level][endEvent] = 0
totals[level][startEvent] += started
totals[level][endEvent] += finished
if started > 0:
print("{0}\t{1}\t{2}\t{3}\t{4}%".format(level, day, started, finished, float(finished) / started * 100))
else:
print("{0}\t{1}\t{2}\t{3}\t".format(level, day, started, finished))
for level in totals:
started = totals[level][startEvent]
finished = totals[level][endEvent]
if started > 0:
print("{0}\t{1}\t{2}\t{3}%".format(level, started, finished, float(finished) / started * 100))
else:
print("{0}\t{1}\t{2}\t".format(level, started, finished))
print("Script runtime: {0}".format(datetime.now() - scriptStart))
| mit |
jittat/cafe-grader-web | lib/assets/Lib/html/parser.py | 13 | 20137 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s, flags=re.ASCII)
| mit |
arifsetiawan/edx-platform | lms/djangoapps/instructor/management/commands/openended_stats.py | 86 | 5361 | """
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
usage_key = course_id.make_usage_key_from_deprecated_string(args[1])
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(usage_key, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(usage_key)
return
try:
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, usage_key, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, location, course=course)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, None), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, None), student.username))
return stats
| agpl-3.0 |
Zimmi48/coq | doc/tools/coqrst/regen_readme.py | 8 | 2329 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Rebuild sphinx/README.rst from sphinx/README.template.rst."""
import re
from os import sys, path
SCRIPT_DIR = path.dirname(path.abspath(__file__))
if __name__ == "__main__" and __package__ is None:
sys.path.append(path.dirname(SCRIPT_DIR))
SPHINX_DIR = path.join(SCRIPT_DIR, "../../sphinx/")
README_TEMPLATE_PATH = path.join(SPHINX_DIR, "README.template.rst")
if len(sys.argv) == 1:
README_PATH = path.join(SPHINX_DIR, "README.rst")
elif len(sys.argv) == 2:
README_PATH = sys.argv[1]
else:
print ("usage: {} [FILE]".format(sys.argv[0]))
sys.exit(1)
import sphinx
from coqrst import coqdomain
README_ROLES_MARKER = "[ROLES]"
README_OBJECTS_MARKER = "[OBJECTS]"
README_DIRECTIVES_MARKER = "[DIRECTIVES]"
FIRST_LINE_BLANKS = re.compile("^(.*)\n *\n")
def format_docstring(template, obj, *strs):
docstring = obj.__doc__.strip()
strs = strs + (FIRST_LINE_BLANKS.sub(r"\1\n", docstring),)
return template.format(*strs)
def notation_symbol(d):
return " :black_nib:" if issubclass(d, coqdomain.NotationObject) else ""
def regen_readme():
objects_docs = [format_docstring("``.. {}::``{} {}", obj, objname, notation_symbol(obj))
for objname, obj in sorted(coqdomain.CoqDomain.directives.items())]
roles = ([(name, cls)
for name, cls in sorted(coqdomain.CoqDomain.roles.items())
if not isinstance(cls, (sphinx.roles.XRefRole, coqdomain.IndexXRefRole))] +
[(fn.role_name, fn)
for fn in coqdomain.COQ_ADDITIONAL_ROLES])
roles_docs = [format_docstring("``:{}:`` {}", role, name)
for (name, role) in roles]
directives_docs = [format_docstring("``.. {}::`` {}", d, d.directive_name)
for d in coqdomain.COQ_ADDITIONAL_DIRECTIVES]
with open(README_TEMPLATE_PATH, encoding="utf-8") as readme:
contents = readme.read()
with open(README_PATH, mode="w", encoding="utf-8") as readme:
readme.write(contents
.replace(README_ROLES_MARKER, "\n\n".join(roles_docs))
.replace(README_OBJECTS_MARKER, "\n\n".join(objects_docs))
.replace(README_DIRECTIVES_MARKER, "\n\n".join(directives_docs)))
if __name__ == '__main__':
regen_readme()
| lgpl-2.1 |
vprime/puuuu | env/lib/python2.7/site-packages/django/contrib/contenttypes/tests.py | 113 | 11127 | from __future__ import unicode_literals
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.models import Site, get_current_site
from django.http import HttpRequest, Http404
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.http import urlquote
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class ConcreteModel(models.Model):
name = models.CharField(max_length=10)
class ProxyModel(ConcreteModel):
class Meta:
proxy = True
@python_2_unicode_compatible
class FooWithoutUrl(models.Model):
"""
Fake model not defining ``get_absolute_url`` for
:meth:`ContentTypesTests.test_shortcut_view_without_get_absolute_url`"""
name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.name
class FooWithUrl(FooWithoutUrl):
"""
Fake model defining ``get_absolute_url`` for
:meth:`ContentTypesTests.test_shortcut_view`
"""
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.name)
class FooWithBrokenAbsoluteUrl(FooWithoutUrl):
"""
Fake model defining a ``get_absolute_url`` method containing an error
"""
def get_absolute_url(self):
return "/users/%s/" % self.unknown_field
class ContentTypesTests(TestCase):
def setUp(self):
self.old_Site_meta_installed = Site._meta.installed
ContentType.objects.clear_cache()
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
if Site._meta.installed:
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
name = 'Old model',
app_label = 'contenttypes',
model = 'OldModel',
)
self.assertEqual(six.text_type(ct), 'Old model')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
| mit |
geektophe/shinken | libexec/notify_by_email.py | 2 | 20418 | #!/usr/bin/env python
#-*-coding:utf-8-*-
# Copyright (C) 2012:
# Romain Forlot, rforlot@yahoo.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import socket
import logging
import getpass
import smtplib
import urllib
from optparse import OptionParser, OptionGroup
from email.mime.text import MIMEText
from email.MIMEImage import MIMEImage
from email.mime.multipart import MIMEMultipart
# Global var
shinken_image_dir = '/var/lib/shinken/share/images'
shinken_customer_logo = 'customer_logo.jpg'
webui_config_file = '/etc/shinken/modules/webui.cfg'
webui2_config_file = '/etc/shinken/modules/webui2.cfg'
webui2_image_dir = '/var/lib/shinken/share/photos'
# Set up root logging
def setup_logging():
log_level = logging.INFO
if opts.debug:
log_level = logging.DEBUG
if opts.logfile:
logging.basicConfig(filename=opts.logfile, level=log_level, format='%(asctime)s:%(levelname)s: %(message)s')
else:
logging.basicConfig(level=log_level, format='%(asctime)s:%(levelname)s: %(message)s')
def overload_test_variable():
shinken_notification_object_var = {
'service': {
'Service description': 'Test_Service',
'Service state': 'TEST',
'Service output': 'Houston, we got a problem here! Oh, wait. No. It\'s just a test.',
'Service state duration': '00h 00min 10s'
},
'host': {
'Hostname': 'Test_Host',
'Host state': 'TEST',
'Host state duration': '00h 00h 20s'
}
}
shinken_var = {
'Hostname': 'shinken',
'Host address': '127.0.0.1',
'Notification type': 'TEST',
'Date': 'Now, test'
}
return (shinken_notification_object_var, shinken_var)
def get_webui_logo():
company_logo=''
try:
webui_config_fh = open(webui2_config_file)
except IOError:
# WebUI2 not installed ...
full_logo_path = os.path.join(shinken_image_dir, shinken_customer_logo)
if os.path.isfile(full_logo_path):
return full_logo_path
if opts.webui:
# WebUI2 installed
logging.debug('Webui2 is installed')
webui_config = webui_config_fh.readlines()
for line in webui_config:
if 'company_logo' in line:
company_logo = line.rsplit('company_logo')[1].strip()
company_logo += '.png'
logging.debug('Found company logo property: %s', company_logo)
if company_logo:
full_logo_path = os.path.join(webui2_image_dir, company_logo)
if os.path.isfile(full_logo_path):
logging.debug('Found company logo file: %s', full_logo_path)
return full_logo_path
else:
logging.debug('File %s does not exist!', full_logo_path)
return ''
return company_logo
def get_webui_port():
port=''
try:
webui_config_fh = open(webui2_config_file)
except IOError:
# WebUI2 not installed, try WebUI1
try:
webui_config_fh = open(webui_config_file)
except IOError:
# No WebUI
return ''
else:
# WebUI1 installed
logging.debug('Webui1 is installed')
else:
# WebUI2 installed
logging.debug('Webui2 is installed')
logging.debug('Webui file handler: %s' % (webui_config_fh))
webui_config = webui_config_fh.readlines()
logging.debug('Webui config: %s' % (webui_config))
for line in webui_config:
if 'port' in line:
port = line.rsplit('port')[1].strip()
return port
def get_shinken_url():
if opts.webui:
hostname = socket.getfqdn()
webui_port = get_webui_port()
if not webui_port:
return
if opts.webui_url:
url = '%s/%s/%s' % (opts.webui_url, opts.notification_object, urllib.quote(shinken_var['Hostname']))
else:
url = 'http://%s:%s/%s/%s' % (hostname, webui_port, opts.notification_object, urllib.quote(shinken_var['Hostname']))
# Append service if we notify a service object
if opts.notification_object == 'service':
url += '/%s' % (urllib.quote(shinken_notification_object_var['service']['Service description']))
return url
# Get current process user that will be the mail sender
def get_user():
if opts.sender:
return opts.sender
else:
return '@'.join((getpass.getuser(), socket.getfqdn()))
#############################################################################
# Common mail functions and var
#############################################################################
mail_welcome = 'Shinken Monitoring System Notification'
mail_format = { 'html': MIMEMultipart(), 'txt': MIMEMultipart('alternative') }
# Construct mail subject field based on which object we notify
def get_mail_subject(object):
mail_subject = {
'host': 'Host %s alert for %s since %s' % (
shinken_notification_object_var['host']['Host state'],
shinken_var['Hostname'],
shinken_notification_object_var['host']['Host state duration']
),
'service': '%s on Host: %s about service %s since %s' % (
shinken_notification_object_var['service']['Service state'],
shinken_var['Hostname'],
shinken_notification_object_var['service']['Service description'],
shinken_notification_object_var['service']['Service state duration']
)
}
return mail_subject[object]
def get_content_to_send():
shinken_var.update(shinken_notification_object_var[opts.notification_object])
# Translate a comma separated list of mail recipient into a python list
def make_receivers_list(receivers):
if ',' in receivers:
ret = receivers.split(',')
else:
ret = [receivers]
return ret
# This just create mail skeleton and doesn't have any content.
# But can be used to add multiple and differents contents.
def create_mail(format):
# Fill SMTP header and body.
# It has to be multipart since we can include an image in it.
logging.debug('Mail format: %s' % (format))
msg = mail_format[format]
logging.debug('From: %s' % (get_user()))
msg['From'] = get_user()
logging.debug('To: %s' % (opts.receivers))
msg['To'] = opts.receivers
logging.debug('Subject: %s' % (opts.prefix + get_mail_subject(opts.notification_object)))
msg['Subject'] = opts.prefix + get_mail_subject(opts.notification_object)
return msg
#############################################################################
# Txt creation lair
#############################################################################
def create_txt_message(msg):
txt_content = [mail_welcome]
get_content_to_send()
for k,v in sorted(shinken_var.iteritems()):
txt_content.append(k + ': ' + v)
# Add url at the end
url = get_shinken_url()
if url != None:
txt_content.append('More details on : %s' % url)
txt_content = '\r\n'.join(txt_content)
msgText = MIMEText(txt_content, 'plain')
msg.attach(msgText)
return msg
#############################################################################
# Html creation lair
#############################################################################
# Process customer logo into mail message so it can be referenced in it later
def add_image2mail(img, mail):
fp = open(img, 'rb')
try:
msgLogo = MIMEImage(fp.read())
msgLogo.add_header('Content-ID', '<customer_logo>')
mail.attach(msgLogo)
except:
pass
fp.close()
return mail
def create_html_message(msg):
# Get url and add it in footer
url = get_shinken_url()
logging.debug('Grabbed Shinken URL : %s' % url)
# Header part
html_content = ['''
<html>\r
<head>\r
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\r
<style type="text/css">\r
body {text-align: center; font-family: Verdana, sans-serif; font-size: 10pt;}\r
img.logo {float: left; margin: 10px 10px 10px; vertical-align: middle}\r
span {font-family: Verdana, sans-serif; font-size: 12pt;}\r
table {text-align:center; margin-left: auto; margin-right: auto;}\r
th {white-space: nowrap;}\r
th.even {background-color: #D9D9D9;}\r
td.even {background-color: #F2F2F2;}\r
th.odd {background-color: #F2F2F2;}\r
td.odd {background-color: #FFFFFF;}\r
th,td {font-family: Verdana, sans-serif; font-size: 10pt; text-align:left;}\r
th.customer {width: 600px; background-color: #004488; color: #ffffff;}\r
</style>\r
</head>\r
<body>\r'''
]
full_logo_path = get_webui_logo()
if full_logo_path:
msg = add_image2mail(full_logo_path, msg)
html_content.append('<img src="cid:customer_logo">')
html_content.append('<table width="600px"><tr><th colspan="2"><span>%s</span></th></tr>' % mail_welcome)
else:
html_content.append('<table width="600px"><tr><th colspan="2"><span>%s</span></th></tr>' % mail_welcome)
# Update shinken_var dict with appropriate dict depending which is object notified
# then we can fill mail content.
odd=True
get_content_to_send()
logging.debug('Content to send: %s' % shinken_var)
for k,v in sorted(shinken_var.iteritems()):
logging.debug('type %s : %s' % (k, type(v)))
if odd:
html_content.append('<tr><th class="odd">' + str(k) + '</th><td class="odd">' + str(v) + '</td></tr>')
odd=False
else:
html_content.append('<tr><th class="even">' + str(k) + '</th><td class="even">' + str(v) + '</td></tr>')
odd=True
html_content.append('</table>')
if url != None:
html_content.append('More details on Shinken WebUI at : <a href="%s">%s</a></body></html>' % (url, url))
else:
html_content.append('</body></html>')
# Make final string var to send and encode it to stdout encoding
# avoiding decoding error.
html_content = '\r\n'.join(html_content)
try:
if sys.stdout.encoding is not None:
encoding = sys.stdout.encoding
else:
encoding = 'utf-8'
html_msg = html_content.encode(encoding)
except UnicodeDecodeError:
logging.debug('Content is Unicode encoded.')
html_msg = html_content.decode('utf-8').encode(encoding)
logging.debug('HTML string: %s' % html_msg)
msgText = MIMEText(html_msg, 'html', encoding)
logging.debug('MIMEText: %s' % msgText)
msg.attach(msgText)
logging.debug('Mail object: %s' % msg)
return msg
if __name__ == "__main__":
parser = OptionParser(description='Notify by email receivers of Shinken alerts. Message will be formatted in html and can embed customer logo. To included customer logo, just load png image named customer_logo.png in '+shinken_image_dir)
group_debug = OptionGroup(parser, 'Debugging and test options', 'Useful to debug script under shinken processes. Useful to just make a standalone test of script to see what it looks like.')
group_general = OptionGroup(parser, 'General options', 'Default options to setup')
group_shinken = OptionGroup(parser, 'Shinken macros to specify.', 'Used to specify usual shinken macros in notifications, if not specified then it will try to get them from environment variable. You need to enable_environment_macros in shinken.cfg if you want to used them. It isn\'t recommended to use environment macros for large environments. You \'d better use options -c and -s or -h depending on which object you\'ll notify for.')
group_shinken_details = OptionGroup(parser, 'Details and additional information', 'You can include some useful additional information to notifications using these options. Good practice is to add HOST or SERVICE macros with these details and provide them to the script')
group_shinken_webui = OptionGroup(parser, 'Shinken WebUI.', 'Used to include some Shinken WebUI information in the notifications.')
# Debug and test options
group_debug.add_option('-D', '--debug', dest='debug', default=False,
action='store_true', help='Generate a test mail message')
group_debug.add_option('-t', '--test', dest='test', default=False,
action='store_true', help='Generate a test mail message')
group_debug.add_option('-l', '--logfile', dest='logfile',
help='Specify a log file. Default: log to stdout.')
# General options
group_general.add_option('-f', '--format', dest='format', type='choice', choices=['txt', 'html'],
default='html', help='Mail format "html" or "txt". Default: html')
group_general.add_option('-r', '--receivers', dest='receivers',
help='Mail recipients comma-separated list')
group_general.add_option('-F', '--sender', dest='sender',
help='Sender email address, default is system user')
group_general.add_option('-S', '--SMTP', dest='smtp', default='localhost',
help='Target SMTP hostname. None for just a sendmail lanch. Default: localhost')
group_general.add_option('-U', '--smtp-user', dest='smtp_user', default=None,
help='SMTP username. Default: None')
group_general.add_option('-P', '--smtp-password', dest='smtp_password', default=None,
help='SMTP password. Default: None')
group_general.add_option('-T', '--smtp-starttls', dest='smtp_starttls', default=False,
action='store_true', help='Connect to smtp using starttls')
group_general.add_option('-p', '--prefix', dest='prefix', default='',
help='Mail subject prefix. Default is no prefix')
# Shinken options
group_shinken.add_option('-n', '--notification-object', dest='notification_object', type='choice', default='host',
choices=['host', 'service'], help='Choose between host or service notification.')
group_shinken.add_option('-c', '--commonmacros', dest='commonmacros',
help='Double comma separated shinken macros in this order : "NOTIFICATIONTYPE$,,$HOSTNAME$,,$HOSTADDRESS$,,$LONGDATETIME$".')
group_shinken.add_option('-o', '--objectmacros', dest='objectmacros',
help='Double comma separated object shinken macros in this order : "$SERVICEDESC$,,$SERVICESTATE$,,$SERVICEOUTPUT$,,$SERVICEDURATION$" for a service object and "$HOSTSTATE$,,$HOSTDURATION$" for an host object')
group_shinken_details.add_option('-d', '--detailleddesc', dest='detailleddesc',
help='Specify $_SERVICEDETAILLEDDESC$ custom macros')
group_shinken_details.add_option('-i', '--impact', dest='impact',
help='Specify the $_SERVICEIMPACT$ custom macros')
group_shinken_details.add_option('-a', '--action', dest='fixaction',
help='Specify the $_SERVICEFIXACTIONS$ custom macros')
# Shinken WebUI options
group_shinken_webui.add_option('-w', '--webui', dest='webui', default=False,
action='store_true', help='Include link to the problem in Shinken WebUI.')
group_shinken_webui.add_option('-u', '--url', dest='webui_url',
help='WebUI URL as http://my_webui:port/url')
parser.add_option_group(group_debug)
parser.add_option_group(group_general)
parser.add_option_group(group_shinken)
parser.add_option_group(group_shinken_details)
parser.add_option_group(group_shinken_webui)
(opts, args) = parser.parse_args()
setup_logging()
# Check and process arguments
#
# Retrieve and setup shinken macros that make the mail content
if opts.commonmacros == None:
shinken_var = {
'Notification type': os.getenv('NAGIOS_NOTIFICATIONTYPE'),
'Hostname': os.getenv('NAGIOS_HOSTNAME'),
'Host address': os.getenv('NAGIOS_HOSTADDRESS'),
'Date' : os.getenv('NAGIOS_LONGDATETIME')
}
else:
macros = opts.commonmacros.split(',,')
shinken_var = {
'Notification type': macros[0],
'Hostname': macros[1],
'Host address': macros[2],
'Date' : macros[3]
}
if opts.objectmacros == None:
shinken_notification_object_var = {
'service': {
'Service description': os.getenv('NAGIOS_SERVICEDESC'),
'Service state': os.getenv('NAGIOS_SERVICESTATE'),
'Service output': os.getenv('NAGIOS_SERVICEOUTPUT'),
'Service state duration': os.getenv('NAGIOS_SERVICEDURATION')
},
'host': {
'Host state': os.getenv('NAGIOS_HOSTSTATE'),
'Host state duration': os.getenv('NAGIOS_HOSTDURATION')
}
}
else:
macros = opts.objectmacros.split(',,')
if opts.notification_object == 'service':
shinken_notification_object_var = {
'service': {
'Service description': macros[0],
'Service state': macros[1],
'Service output': macros[2],
'Service state duration': macros[3]
},
'host': {
'Host state': '',
'Host state duration': ''
}
}
else:
shinken_notification_object_var = {
'service': {
'Service description': '',
'Service state': '',
'Service output': '',
'Service state duration': ''
},'host': {
'Host state': macros[0],
'Host state duration': macros[1]
}
}
# Load test values
if opts.test:
shinken_notification_object_var, shinken_var = overload_test_variable()
# check required arguments
if opts.receivers == None:
logging.error('You must define at least one mail recipient using -r')
sys.exit(5)
else:
contactemail = opts.receivers
if opts.detailleddesc:
shinken_var['Detailled description'] = opts.detailleddesc.decode(sys.stdin.encoding)
if opts.impact:
shinken_var['Impact'] = opts.impact.decode(sys.stdin.encoding)
if opts.fixaction:
shinken_var['Fix actions'] = opts.fixaction.decode(sys.stdin.encoding)
receivers = make_receivers_list(opts.receivers)
logging.debug('Create mail skeleton')
mail = create_mail(opts.format)
logging.debug('Create %s mail content' % (opts.format))
if opts.format == 'html':
mail = create_html_message(mail)
elif opts.format == 'txt':
mail = create_txt_message(mail)
# Use SMTP or sendmail to send the mail ...
if opts.smtp != 'None':
logging.debug('Connect to %s smtp server' % (opts.smtp))
smtp = smtplib.SMTP(opts.smtp)
logging.debug('Send the mail')
if opts.smtp_starttls:
smtp.starttls()
if opts.smtp_user and opts.smtp_password:
smtp.login(opts.smtp_user, opts.smtp_password)
smtp.sendmail(get_user(), receivers, mail.as_string())
logging.info("Mail sent successfuly")
else:
sendmail = '/usr/sbin/sendmail'
logging.debug('Send the mail')
p = os.popen('%s -t' % sendmail, 'w')
logging.debug('Final mail : ' + mail.as_string())
logging.debug('Send the mail')
p.write(mail.as_string())
status = p.close()
if status is not None:
logging.error("Sendmail returned %s" % status)
else:
logging.info("Mail sent successfuly")
| agpl-3.0 |
siggame/Joueur.py | games/spiders/game_object.py | 1 | 2541 | # GameObject: An object in the game. The most basic class that all game classes should inherit from automatically.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from typing import List
from joueur.base_game_object import BaseGameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class GameObject(BaseGameObject):
"""The class representing the GameObject in the Spiders game.
An object in the game. The most basic class that all game classes should inherit from automatically.
"""
def __init__(self):
"""Initializes a GameObject with basic logic as provided by the Creer code generator.
"""
BaseGameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._game_object_name = ""
self._id = ""
self._logs = []
@property
def game_object_name(self) -> str:
"""str: String representing the top level Class that this game object is an instance of. Used for reflection to create new instances on clients, but exposed for convenience should AIs want this data.
"""
return self._game_object_name
@property
def id(self) -> str:
"""str: A unique id for each instance of a GameObject or a sub class. Used for client and server communication. Should never change value after being set.
"""
return self._id
@property
def logs(self) -> List[str]:
"""list[str]: Any strings logged will be stored here. Intended for debugging.
"""
return self._logs
def log(self, message: str) -> None:
"""Adds a message to this GameObject's logs. Intended for your own debugging purposes, as strings stored here are saved in the gamelog.
Args:
message (str): A string to add to this GameObject's log. Intended for debugging.
"""
return self._run_on_server('log', {
'message': message
})
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| mit |
jaeilepp/mne-python | examples/realtime/plot_compute_rt_decoder.py | 4 | 3942 | """
=======================
Decoding real-time data
=======================
Supervised machine learning applied to MEG data in sensor space.
Here the classifier is updated every 5 trials and the decoding
accuracy is plotted
"""
# Authors: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import MockRtClient, RtEpochs
from mne.datasets import sample
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
tr_percent = 60 # Training percentage
min_trials = 10 # minimum trials after which decoding should start
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1,
reject=dict(grad=4000e-13, eog=150e-6), baseline=None,
isi_max=4.)
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=90, buffer_size=1000)
# Decoding in sensor space using a linear SVM
n_times = len(rt_epochs.times)
from sklearn import preprocessing # noqa
from sklearn.svm import SVC # noqa
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score, ShuffleSplit # noqa
from mne.decoding import Vectorizer, FilterEstimator # noqa
scores_x, scores, std_scores = [], [], []
# don't highpass filter because it's epoched data and the signal length
# is small
filt = FilterEstimator(rt_epochs.info, None, 40)
scaler = preprocessing.StandardScaler()
vectorizer = Vectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
data_picks = mne.pick_types(rt_epochs.info, meg='grad', eeg=False, eog=True,
stim=False, exclude=raw.info['bads'])
ax = plt.subplot(111)
ax.set_xlabel('Trials')
ax.set_ylabel('Classification score (% correct)')
ax.set_title('Real-time decoding')
ax.set_xlim([min_trials, 50])
ax.set_ylim([30, 105])
plt.axhline(50, color='k', linestyle='--', label="Chance level")
plt.show(block=False)
for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ev_num + 1))
if ev_num == 0:
X = ev.data[None, data_picks, :]
y = int(ev.comment) # the comment attribute contains the event_id
else:
X = np.concatenate((X, ev.data[None, data_picks, :]), axis=0)
y = np.append(y, int(ev.comment))
if ev_num >= min_trials:
cv = ShuffleSplit(len(y), 5, test_size=0.2, random_state=42)
scores_t = cross_val_score(concat_classifier, X, y, cv=cv,
n_jobs=1) * 100
std_scores.append(scores_t.std())
scores.append(scores_t.mean())
scores_x.append(ev_num)
# Plot accuracy
plt.plot(scores_x[-2:], scores[-2:], '-x', color='b',
label="Classif. score")
ax.hold(True)
ax.plot(scores_x[-1], scores[-1])
hyp_limits = (np.asarray(scores) - np.asarray(std_scores),
np.asarray(scores) + np.asarray(std_scores))
fill = plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1],
color='b', alpha=0.5)
plt.pause(0.01)
plt.draw()
ax.collections.remove(fill) # Remove old fill area
plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1], color='b',
alpha=0.5)
plt.draw() # Final figure
| bsd-3-clause |
mumrah/kafka-python | kafka/consumer/group.py | 2 | 58768 | from __future__ import absolute_import, division
import copy
import logging
import socket
import time
from kafka.errors import KafkaConfigurationError, UnsupportedVersionError
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
from kafka.consumer.fetcher import Fetcher
from kafka.consumer.subscription_state import SubscriptionState
from kafka.coordinator.consumer import ConsumerCoordinator
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.offset import OffsetResetStrategy
from kafka.structs import TopicPartition
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaConsumer(six.Iterator):
"""Consume records from a Kafka cluster.
The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (requires
kafka >= 0.9.0.0).
The consumer is not thread safe and should not be shared across threads.
Arguments:
*topics (str): optional list of topics to subscribe to. If not set,
call :meth:`~kafka.KafkaConsumer.subscribe` or
:meth:`~kafka.KafkaConsumer.assign` before consuming records.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): A name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
group_id (str or None): The name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: None
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if the
first message in the first non-empty partition of the fetch is
larger than this value, the message will still be returned to
ensure that the consumer can make progress. NOTE: consumer performs
fetches to multiple brokers in parallel so memory usage will depend
on the number of brokers containing partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 MB).
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 305000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to backoff/wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. Once the maximum is reached,
reconnection attempts will continue periodically with this fixed
rate. To avoid connection storms, a randomization factor of 0.2
will be applied to the backoff resulting in a random range between
20% below and 20% above the computed value. Default: 1000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If True , the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): Number of milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): Called as
callback(offsets, response) response will be either an Exception
or an OffsetCommitResponse struct. This callback can be used to
trigger custom actions when a commit request completes.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata, even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
partition_assignment_strategy (list): List of objects to use to
distribute partition ownership amongst consumer instances when
group management is used.
Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
max_poll_records (int): The maximum number of records returned in a
single call to :meth:`~kafka.KafkaConsumer.poll`. Default: 500
max_poll_interval_ms (int): The maximum delay between invocations of
:meth:`~kafka.KafkaConsumer.poll` when using consumer group
management. This places an upper bound on the amount of time that
the consumer can be idle before fetching more records. If
:meth:`~kafka.KafkaConsumer.poll` is not called before expiration
of this timeout, then the consumer is considered failed and the
group will rebalance in order to reassign the partitions to another
member. Default 300000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group management facilities. The consumer sends
periodic heartbeats to indicate its liveness to the broker. If
no heartbeats are received by the broker before the expiration of
this session timeout, then the broker will remove this consumer
from the group and initiate a rebalance. Note that the value must
be in the allowable range as configured in the broker configuration
by group.min.session.timeout.ms and group.max.session.timeout.ms.
Default: 10000
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management facilities. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). The java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). The java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
consumer_timeout_ms (int): number of milliseconds to block during
message iteration before raising StopIteration (i.e., ending the
iterator). Default block forever [float('inf')].
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
Default: True.
ssl_cafile (str): Optional filename of ca file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use. If set to
None, the client will attempt to infer the broker version by probing
various APIs. Different versions enable different functionality.
Examples:
(0, 9) enables full group coordination features with automatic
partition assignment and rebalancing,
(0, 8, 2) enables kafka-storage offset commits with manual
partition assignment only,
(0, 8, 1) enables zookeeper-storage offset commits with manual
partition assignment only,
(0, 8, 0) enables basic functionality but requires manual
partition assignment and offset management.
Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version set to None.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
metric_reporters (list): A list of classes to use as metrics reporters.
Implementing the AbstractMetricsReporter interface allows plugging
in classes that will be notified of new metric creation. Default: []
metrics_num_samples (int): The number of samples maintained to compute
metrics. Default: 2
metrics_sample_window_ms (int): The maximum age in milliseconds of
samples used to compute metrics. Default: 30000
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to True
the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+ Default: True
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
Note:
Configuration parameters are described in more detail at
https://kafka.apache.org/documentation/#consumerconfigs
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'group_id': None,
'key_deserializer': None,
'value_deserializer': None,
'fetch_max_wait_ms': 500,
'fetch_min_bytes': 1,
'fetch_max_bytes': 52428800,
'max_partition_fetch_bytes': 1 * 1024 * 1024,
'request_timeout_ms': 305000, # chosen to be higher than the default of max_poll_interval_ms
'retry_backoff_ms': 100,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'auto_offset_reset': 'latest',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': lambda offsets, response: True,
'check_crcs': True,
'metadata_max_age_ms': 5 * 60 * 1000,
'partition_assignment_strategy': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'max_poll_records': 500,
'max_poll_interval_ms': 300000,
'session_timeout_ms': 10000,
'heartbeat_interval_ms': 3000,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'consumer_timeout_ms': float('inf'),
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'connections_max_idle_ms': 9 * 60 * 1000,
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
'metric_group_prefix': 'consumer',
'selector': selectors.DefaultSelector,
'exclude_internal_topics': True,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None,
'legacy_iterator': False, # enable to revert to < 1.4.7 iterator
}
DEFAULT_SESSION_TIMEOUT_MS_0_9 = 30000
def __init__(self, *topics, **configs):
# Only check for extra config keys in top-level class
extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
if extra_configs:
raise KafkaConfigurationError("Unrecognized configs: %s" % (extra_configs,))
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
connections_max_idle_ms = self.config['connections_max_idle_ms']
request_timeout_ms = self.config['request_timeout_ms']
fetch_max_wait_ms = self.config['fetch_max_wait_ms']
if not (fetch_max_wait_ms < request_timeout_ms < connections_max_idle_ms):
raise KafkaConfigurationError(
"connections_max_idle_ms ({}) must be larger than "
"request_timeout_ms ({}) which must be larger than "
"fetch_max_wait_ms ({})."
.format(connections_max_idle_ms, request_timeout_ms, fetch_max_wait_ms))
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. Accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
str(self.config['api_version']), str_version)
self._client = KafkaClient(metrics=self._metrics, **self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
# Coordinator configurations are different for older brokers
# max_poll_interval_ms is not supported directly -- it must the be
# the same as session_timeout_ms. If the user provides one of them,
# use it for both. Otherwise use the old default of 30secs
if self.config['api_version'] < (0, 10, 1):
if 'session_timeout_ms' not in configs:
if 'max_poll_interval_ms' in configs:
self.config['session_timeout_ms'] = configs['max_poll_interval_ms']
else:
self.config['session_timeout_ms'] = self.DEFAULT_SESSION_TIMEOUT_MS_0_9
if 'max_poll_interval_ms' not in configs:
self.config['max_poll_interval_ms'] = self.config['session_timeout_ms']
if self.config['group_id'] is not None:
if self.config['request_timeout_ms'] <= self.config['session_timeout_ms']:
raise KafkaConfigurationError(
"Request timeout (%s) must be larger than session timeout (%s)" %
(self.config['request_timeout_ms'], self.config['session_timeout_ms']))
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def bootstrap_connected(self):
"""Return True if the bootstrap is connected."""
return self._client.bootstrap_connected()
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
def assignment(self):
"""Get the TopicPartitions currently assigned to this consumer.
If partitions were directly assigned using
:meth:`~kafka.KafkaConsumer.assign`, then this will simply return the
same partitions that were previously assigned. If topics were
subscribed using :meth:`~kafka.KafkaConsumer.subscribe`, then this will
give the set of topic partitions currently assigned to the consumer
(which may be None if the assignment hasn't happened yet, or if the
partitions are in the process of being reassigned).
Returns:
set: {TopicPartition, ...}
"""
return self._subscription.assigned_partitions()
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self._closed = True
self._coordinator.close(autocommit=autocommit)
self._metrics.close()
self._client.close()
try:
self.config['key_deserializer'].close()
except AttributeError:
pass
try:
self.config['value_deserializer'].close()
except AttributeError:
pass
log.debug("The KafkaConsumer has closed.")
def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).
Currently only supports kafka-topic offset storage (not zookeeper).
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
self._coordinator.commit_offsets_sync(offsets)
def committed(self, partition, metadata=False):
"""Get the last committed offset for the given partition.
This offset will be used as the position for the consumer
in the event of a failure.
This call may block to do a remote call if the partition in question
isn't assigned to this consumer or if the consumer hasn't yet
initialized its cache of committed offsets.
Arguments:
partition (TopicPartition): The partition to check.
metadata (bool, optional): If True, return OffsetAndMetadata struct
instead of offset int. Default: False.
Returns:
The last committed offset (int or OffsetAndMetadata), or None if there was no prior commit.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
if self._subscription.is_assigned(partition):
committed = self._subscription.assignment[partition].committed
if committed is None:
self._coordinator.refresh_committed_offsets_if_needed()
committed = self._subscription.assignment[partition].committed
else:
commit_map = self._coordinator.fetch_committed_offsets([partition])
if partition in commit_map:
committed = commit_map[partition]
else:
committed = None
if committed is not None:
if metadata:
return committed
else:
return committed.offset
def _fetch_all_topic_metadata(self):
"""A blocking call that fetches topic metadata for all topics in the
cluster that the user is authorized to view.
"""
cluster = self._client.cluster
if self._client._metadata_refresh_in_progress and self._client._topics:
future = cluster.request_update()
self._client.poll(future=future)
stash = cluster.need_all_topic_metadata
cluster.need_all_topic_metadata = True
future = cluster.request_update()
self._client.poll(future=future)
cluster.need_all_topic_metadata = stash
def topics(self):
"""Get all topics the user is authorized to view.
This will always issue a remote call to the cluster to fetch the latest
information.
Returns:
set: topics
"""
self._fetch_all_topic_metadata()
return self._client.cluster.topics()
def partitions_for_topic(self, topic):
"""This method first checks the local metadata cache for information
about the topic. If the topic is not found (either because the topic
does not exist, the user is not authorized to view the topic, or the
metadata cache is not populated), then it will issue a metadata update
call to the cluster.
Arguments:
topic (str): Topic to check.
Returns:
set: Partition ids
"""
cluster = self._client.cluster
partitions = cluster.partitions_for_topic(topic)
if partitions is None:
self._fetch_all_topic_metadata()
partitions = cluster.partitions_for_topic(topic)
return partitions
def poll(self, timeout_ms=0, max_records=None, update_offsets=True):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
# Note: update_offsets is an internal-use only argument. It is used to
# support the python iterator interface, and which wraps consumer.poll()
# and requires that the partition offsets tracked by the fetcher are not
# updated until the iterator returns each record to the user. As such,
# the argument is not documented and should not be relied on by library
# users to not break in the future.
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
assert isinstance(max_records, int), 'max_records must be an integer'
assert max_records > 0, 'max_records must be positive'
assert not self._closed, 'KafkaConsumer is closed'
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records, update_offsets=update_offsets)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records, update_offsets=True):
"""Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty).
"""
self._coordinator.poll()
# Fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# If data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records, update_offsets=update_offsets)
if records:
# Before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
futures = self._fetcher.send_fetches()
if len(futures):
self._client.poll(timeout_ms=0)
return records
# Send any new fetches (won't resend pending fetches)
futures = self._fetcher.send_fetches()
if len(futures):
self._client.poll(timeout_ms=0)
timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000)
self._client.poll(timeout_ms=timeout_ms)
# after the long poll, we should check whether the group needs to rebalance
# prior to returning data so that the group can stabilize faster
if self._coordinator.need_rejoin():
return {}
records, _ = self._fetcher.fetched_records(max_records, update_offsets=update_offsets)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset
def highwater(self, partition):
"""Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
partition (TopicPartition): Partition to check
Returns:
int or None: Offset if available
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
return self._subscription.assignment[partition].highwater
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
# Because the iterator checks is_fetchable() on each iteration
# we expect pauses to get handled automatically and therefore
# we do not need to reset the full iterator (forcing a full refetch)
def paused(self):
"""Get the partitions that were previously paused using
:meth:`~kafka.KafkaConsumer.pause`.
Returns:
set: {partition (TopicPartition), ...}
"""
return self._subscription.paused_partitions()
def resume(self, *partitions):
"""Resume fetching from the specified (paused) partitions.
Arguments:
*partitions (TopicPartition): Partitions to resume.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Resuming partition %s", partition)
self._subscription.resume(partition)
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
:meth:`~kafka.KafkaConsumer.poll`.
Note: You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets.
Arguments:
partition (TopicPartition): Partition for seek operation
offset (int): Message offset in partition
Raises:
AssertionError: If offset is not an int >= 0; or if partition is not
currently assigned.
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
assert partition in self._subscription.assigned_partitions(), 'Unassigned partition'
log.debug("Seeking to offset %s for partition %s", offset, partition)
self._subscription.assignment[partition].seek(offset)
if not self.config['legacy_iterator']:
self._iterator = None
def seek_to_beginning(self, *partitions):
"""Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
if not self.config['legacy_iterator']:
self._iterator = None
def seek_to_end(self, *partitions):
"""Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
if not self.config['legacy_iterator']:
self._iterator = None
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with :meth:`~kafka.KafkaConsumer.assign`.
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
Raises:
IllegalStateError: If called after previously calling
:meth:`~kafka.KafkaConsumer.assign`.
AssertionError: If neither topics or pattern is provided.
TypeError: If listener is not a ConsumerRebalanceListener.
"""
# SubscriptionState handles error checking
self._subscription.subscribe(topics=topics,
pattern=pattern,
listener=listener)
# Regex will need all topic metadata
if pattern is not None:
self._client.cluster.need_all_topic_metadata = True
self._client.set_topics([])
self._client.cluster.request_update()
log.debug("Subscribed to topic pattern: %s", pattern)
else:
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics(self._subscription.group_subscription())
log.debug("Subscribed to topic(s): %s", topics)
def subscription(self):
"""Get the current topic subscription.
Returns:
set: {topic, ...}
"""
if self._subscription.subscription is None:
return None
return self._subscription.subscription.copy()
def unsubscribe(self):
"""Unsubscribe from all topics and clear all assigned partitions."""
self._subscription.unsubscribe()
self._coordinator.close()
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics([])
log.debug("Unsubscribed all topics or patterns and assigned partitions")
if not self.config['legacy_iterator']:
self._iterator = None
def metrics(self, raw=False):
"""Get metrics on consumer performance.
This is ported from the Java Consumer, for details see:
https://kafka.apache.org/documentation/#consumer_monitoring
Warning:
This is an unstable interface. It may change in future
releases without warning.
"""
if raw:
return self._metrics.metrics.copy()
metrics = {}
for k, v in six.iteritems(self._metrics.metrics.copy()):
if k.group not in metrics:
metrics[k.group] = {}
if k.name not in metrics[k.group]:
metrics[k.group][k.name] = {}
metrics[k.group][k.name] = v.value()
return metrics
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in six.iteritems(timestamps):
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms'])
def beginning_offsets(self, partitions):
"""Get the first offset for the given partitions.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The earliest available offsets for the
given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms.
"""
offsets = self._fetcher.beginning_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def end_offsets(self, partitions):
"""Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The end offsets for the given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
offsets = self._fetcher.end_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def _use_consumer_group(self):
"""Return True iff this consumer can/should join a broker-coordinated group."""
if self.config['api_version'] < (0, 9):
return False
elif self.config['group_id'] is None:
return False
elif not self._subscription.partitions_auto_assigned():
return False
return True
def _update_fetch_positions(self, partitions):
"""Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions.
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined.
"""
# Lookup any positions for partitions which are awaiting reset (which may be the
# case if the user called :meth:`seek_to_beginning` or :meth:`seek_to_end`. We do
# this check first to avoid an unnecessary lookup of committed offsets (which
# typically occurs when the user is manually assigning partitions and managing
# their own offsets).
self._fetcher.reset_offsets_if_needed(partitions)
if not self._subscription.has_all_fetch_positions():
# if we still don't have offsets for all partitions, then we should either seek
# to the last committed position or reset using the auto reset policy
if (self.config['api_version'] >= (0, 8, 1) and
self.config['group_id'] is not None):
# first refresh commits for all assigned partitions
self._coordinator.refresh_committed_offsets_if_needed()
# Then, do any offset lookups in case some positions are not known
self._fetcher.update_fetch_positions(partitions)
def _message_generator_v2(self):
timeout_ms = 1000 * (self._consumer_timeout - time.time())
record_map = self.poll(timeout_ms=timeout_ms, update_offsets=False)
for tp, records in six.iteritems(record_map):
# Generators are stateful, and it is possible that the tp / records
# here may become stale during iteration -- i.e., we seek to a
# different offset, pause consumption, or lose assignment.
for record in records:
# is_fetchable(tp) should handle assignment changes and offset
# resets; for all other changes (e.g., seeks) we'll rely on the
# outer function destroying the existing iterator/generator
# via self._iterator = None
if not self._subscription.is_fetchable(tp):
log.debug("Not returning fetched records for partition %s"
" since it is no longer fetchable", tp)
break
self._subscription.assignment[tp].position = record.offset + 1
yield record
def _message_generator(self):
assert self.assignment() or self.subscription() is not None, 'No topic subscription or manual partition assignment'
while time.time() < self._consumer_timeout:
self._coordinator.poll()
# Fetch offsets for any subscribed partitions that we arent tracking yet
if not self._subscription.has_all_fetch_positions():
partitions = self._subscription.missing_fetch_positions()
self._update_fetch_positions(partitions)
poll_ms = min((1000 * (self._consumer_timeout - time.time())), self.config['retry_backoff_ms'])
self._client.poll(timeout_ms=poll_ms)
# after the long poll, we should check whether the group needs to rebalance
# prior to returning data so that the group can stabilize faster
if self._coordinator.need_rejoin():
continue
# We need to make sure we at least keep up with scheduled tasks,
# like heartbeats, auto-commits, and metadata refreshes
timeout_at = self._next_timeout()
# Short-circuit the fetch iterator if we are already timed out
# to avoid any unintentional interaction with fetcher setup
if time.time() > timeout_at:
continue
for msg in self._fetcher:
yield msg
if time.time() > timeout_at:
log.debug("internal iterator timeout - breaking for poll")
break
self._client.poll(timeout_ms=0)
# An else block on a for loop only executes if there was no break
# so this should only be called on a StopIteration from the fetcher
# We assume that it is safe to init_fetches when fetcher is done
# i.e., there are no more records stored internally
else:
self._fetcher.send_fetches()
def _next_timeout(self):
timeout = min(self._consumer_timeout,
self._client.cluster.ttl() / 1000.0 + time.time(),
self._coordinator.time_to_next_poll() + time.time())
return timeout
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
if self._closed:
raise StopIteration('KafkaConsumer closed')
# Now that the heartbeat thread runs in the background
# there should be no reason to maintain a separate iterator
# but we'll keep it available for a few releases just in case
if self.config['legacy_iterator']:
return self.next_v1()
else:
return self.next_v2()
def next_v2(self):
self._set_consumer_timeout()
while time.time() < self._consumer_timeout:
if not self._iterator:
self._iterator = self._message_generator_v2()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise StopIteration()
def next_v1(self):
if not self._iterator:
self._iterator = self._message_generator()
self._set_consumer_timeout()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise
def _set_consumer_timeout(self):
# consumer_timeout_ms can be used to stop iteration early
if self.config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (
self.config['consumer_timeout_ms'] / 1000.0)
| apache-2.0 |
MaxPowerWasTaken/MaxPowerWasTaken.github.io | jupyter_notebooks/mnist code scratch.py | 1 | 2227 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 00:52:20 2017
@author: max
"""
import math
import random
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import os
os.chdir('/home/max/model_idiot/content/jupyter_notebooks')
# load mnist data
mnist = fetch_mldata('MNIST original', data_home='datasets/')
# Convert sklearn 'datasets bunch' object to Pandas
y = pd.Series(mnist.target).astype('int').astype('category')
X = pd.DataFrame(mnist.data)
# Change column-names in X to reflect that they are pixel values
X.columns = ['pixel_'+str(x) for x in range(X.shape[1])]
# Prepare to plot 9 random images
images_to_plot = 9
random_indices = random.sample(range(70000), images_to_plot)
sample_images = X.loc[random_indices, :]
sample_labels = y.loc[random_indices]
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = .4)
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import PCA
tsne = TSNE()
tsne
# It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or
# TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50)
# if the number of features is very high.
rows=1000
sample_indices = random.sample(range(X_train.shape[0]), rows)
X_train_sample = X_train.iloc[sample_indices,:]
y_train_sample = y_train.iloc[sample_indices]
# https://www.reddit.com/r/MachineLearning/comments/47kf7w/scikitlearn_tsne_implementation/ (suggests lr=200)
pca_preprocessed_tsne = make_pipeline(PCA(n_components=50), TSNE(n_components=2, learning_rate=200, perplexity=50))
embedded_data = pca_preprocessed_tsne.fit_transform(X_train_sample)
plt.figure()
ax = plt.subplot(111)
X = embedded_data
for i in range(X.shape[0]):
print(i)
print(X[i, 0], X[i, 1])
print(str(y_train_sample.iloc[i]))
print(y_train_sample.iloc[i])
plt.text(X[i, 0], X[i, 1], str(y_train_sample.iloc[i]),
color=plt.cm.Set1(y_train_sample.iloc[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.show()
| gpl-3.0 |
dgoedkoop/QGIS | python/plugins/processing/algs/gdal/ClipVectorByExtent.py | 7 | 4458 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ClipVectorByExtent.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsVectorLayer,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterExtent,
QgsProcessingParameterString,
QgsProcessingParameterVectorDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class ClipVectorByExtent(GdalAlgorithm):
INPUT = 'INPUT'
EXTENT = 'EXTENT'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterExtent(self.EXTENT,
self.tr('Clipping extent')))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterVectorDestination(self.OUTPUT,
self.tr('Clipped (extent)')))
def name(self):
return 'clipvectorbyextent'
def displayName(self):
return self.tr('Clip vector by extent')
def group(self):
return self.tr('Vector geoprocessing')
def groupId(self):
return 'vectorgeoprocessing'
def commandName(self):
return 'ogr2ogr'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
extent = self.parameterAsExtent(parameters, self.EXTENT, context, source.sourceCrs())
options = self.parameterAsString(parameters, self.OPTIONS, context)
outFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
output, outputFormat = GdalUtils.ogrConnectionStringAndFormat(outFile, context)
arguments = []
arguments.append('-spat')
arguments.append(str(extent.xMinimum()))
arguments.append(str(extent.yMaximum()))
arguments.append(str(extent.xMaximum()))
arguments.append(str(extent.yMinimum()))
arguments.append('-clipsrc spat_extent')
arguments.append(output)
arguments.append(ogrLayer)
arguments.append(layerName)
if options:
arguments.append(options)
if outputFormat:
arguments.append('-f {}'.format(outputFormat))
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
JCA-Developpement/Odoo | addons/multi_company/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Abraca/debian | waflib/Tools/c.py | 329 | 1066 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import TaskGen,Task,Utils
from waflib.Tools import c_preproc
from waflib.Tools.ccroot import link_task,stlink_task
@TaskGen.extension('.c')
def c_hook(self,node):
return self.create_compiled_task('c',node)
class c(Task.Task):
run_str='${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${CPPFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
vars=['CCDEPS']
ext_in=['.h']
scan=c_preproc.scan
class cprogram(link_task):
run_str='${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB}'
ext_out=['.bin']
vars=['LINKDEPS']
inst_to='${BINDIR}'
class cshlib(cprogram):
inst_to='${LIBDIR}'
class cstlib(stlink_task):
pass
| gpl-2.0 |
ryanralph/DIY-Piper | Adafruit_Thermal.py | 2 | 16561 | #*************************************************************************
# This is a Python library for the Adafruit Thermal Printer.
# Pick one up at --> http://www.adafruit.com/products/597
# These printers use TTL serial to communicate, 2 pins are required.
# IMPORTANT: On 3.3V systems (e.g. Raspberry Pi), use a 10K resistor on
# the RX pin (TX on the printer, green wire), or simply leave unconnected.
#
# Adafruit invests time and resources providing this open source code.
# Please support Adafruit and open-source hardware by purchasing products
# from Adafruit!
#
# Written by Limor Fried/Ladyada for Adafruit Industries.
# Python port by Phil Burgess for Adafruit Industries.
# MIT license, all text above must be included in any redistribution.
#*************************************************************************
# This is pretty much a 1:1 direct Python port of the Adafruit_Thermal
# library for Arduino. All methods use the same naming conventions as the
# Arduino library, with only slight changes in parameter behavior where
# needed. This should simplify porting existing Adafruit_Thermal-based
# printer projects to Raspberry Pi, BeagleBone, etc. See printertest.py
# for an example.
#
# One significant change is the addition of the printImage() function,
# which ties this to the Python Imaging Library and opens the door to a
# lot of cool graphical stuff!
#
# TO DO:
# - Might use standard ConfigParser library to put thermal calibration
# settings in a global configuration file (rather than in the library).
# - Make this use proper Python library installation procedure.
# - Trap errors properly. Some stuff just falls through right now.
# - Add docstrings throughout!
# Python 2.X code using the library usu. needs to include the next line:
from __future__ import print_function
from serial import Serial
import time
class Adafruit_Thermal(Serial):
resumeTime = 0.0
byteTime = 0.0
dotPrintTime = 0.033
dotFeedTime = 0.0025
prevByte = '\n'
column = 0
maxColumn = 32
charHeight = 24
lineSpacing = 8
barcodeHeight = 50
printMode = 0
defaultHeatTime = 60
def __init__(self, *args, **kwargs):
# If no parameters given, use default port & baud rate.
# If only port is passed, use default baud rate.
# If both passed, use those values.
baudrate = 19200
if len(args) == 0:
args = [ "/dev/ttyAMA0", baudrate ]
elif len(args) == 1:
args = [ args[0], baudrate ]
else:
baudrate = args[1]
# Calculate time to issue one byte to the printer.
# 11 bits (not 8) to accommodate idle, start and stop bits.
# Idle time might be unnecessary, but erring on side of
# caution here.
self.byteTime = 11.0 / float(baudrate)
Serial.__init__(self, *args, **kwargs)
# Remainder of this method was previously in begin()
# The printer can't start receiving data immediately upon
# power up -- it needs a moment to cold boot and initialize.
# Allow at least 1/2 sec of uptime before printer can
# receive data.
self.timeoutSet(0.5)
self.wake()
self.reset()
# Description of print settings from page 23 of the manual:
# ESC 7 n1 n2 n3 Setting Control Parameter Command
# Decimal: 27 55 n1 n2 n3
# Set "max heating dots", "heating time", "heating interval"
# n1 = 0-255 Max heat dots, Unit (8dots), Default: 7 (64 dots)
# n2 = 3-255 Heating time, Unit (10us), Default: 80 (800us)
# n3 = 0-255 Heating interval, Unit (10us), Default: 2 (20us)
# The more max heating dots, the more peak current will cost
# when printing, the faster printing speed. The max heating
# dots is 8*(n1+1). The more heating time, the more density,
# but the slower printing speed. If heating time is too short,
# blank page may occur. The more heating interval, the more
# clear, but the slower printing speed.
heatTime = kwargs.get('heattime', self.defaultHeatTime)
self.writeBytes(
27, # Esc
55, # 7 (print settings)
20, # Heat dots (20 = balance darkness w/no jams)
heatTime, # Lib default = 45
250) # Heat interval (500 uS = slower but darker)
# Description of print density from page 23 of the manual:
# DC2 # n Set printing density
# Decimal: 18 35 n
# D4..D0 of n is used to set the printing density.
# Density is 50% + 5% * n(D4-D0) printing density.
# D7..D5 of n is used to set the printing break time.
# Break time is n(D7-D5)*250us.
# (Unsure of the default value for either -- not documented)
printDensity = 14 # 120% (can go higher, but text gets fuzzy)
printBreakTime = 4 # 500 uS
self.writeBytes(
18, # DC2
35, # Print density
(printBreakTime << 5) | printDensity)
self.dotPrintTime = 0.03
self.dotFeedTime = 0.0021
# Because there's no flow control between the printer and computer,
# special care must be taken to avoid overrunning the printer's
# buffer. Serial output is throttled based on serial speed as well
# as an estimate of the device's print and feed rates (relatively
# slow, being bound to moving parts and physical reality). After
# an operation is issued to the printer (e.g. bitmap print), a
# timeout is set before which any other printer operations will be
# suspended. This is generally more efficient than using a delay
# in that it allows the calling code to continue with other duties
# (e.g. receiving or decoding an image) while the printer
# physically completes the task.
# Sets estimated completion time for a just-issued task.
def timeoutSet(self, x):
self.resumeTime = time.time() + x
# Waits (if necessary) for the prior task to complete.
def timeoutWait(self):
while (time.time() - self.resumeTime) < 0: pass
# Printer performance may vary based on the power supply voltage,
# thickness of paper, phase of the moon and other seemingly random
# variables. This method sets the times (in microseconds) for the
# paper to advance one vertical 'dot' when printing and feeding.
# For example, in the default initialized state, normal-sized text
# is 24 dots tall and the line spacing is 32 dots, so the time for
# one line to be issued is approximately 24 * print time + 8 * feed
# time. The default print and feed times are based on a random
# test unit, but as stated above your reality may be influenced by
# many factors. This lets you tweak the timing to avoid excessive
# delays and/or overrunning the printer buffer.
def setTimes(self, p, f):
# Units are in microseconds for
# compatibility with Arduino library
self.dotPrintTime = p / 1000000.0
self.dotFeedTime = f / 1000000.0
# 'Raw' byte-writing method
def writeBytes(self, *args):
self.timeoutWait()
self.timeoutSet(len(args) * self.byteTime)
for arg in args:
super(Adafruit_Thermal, self).write(chr(arg))
# Override write() method to keep track of paper feed.
def write(self, *data):
for i in range(len(data)):
c = data[i]
if c != 0x13:
self.timeoutWait()
super(Adafruit_Thermal, self).write(c)
d = self.byteTime
if ((c == '\n') or
(self.column == self.maxColumn)):
# Newline or wrap
if self.prevByte == '\n':
# Feed line (blank)
d += ((self.charHeight +
self.lineSpacing) *
self.dotFeedTime)
else:
# Text line
d += ((self.charHeight *
self.dotPrintTime) +
(self.lineSpacing *
self.dotFeedTime))
self.column = 0
# Treat wrap as newline
# on next pass
c = '\n'
else:
self.column += 1
self.timeoutSet(d)
self.prevByte = c
# The bulk of this method was moved into __init__,
# but this is left here for compatibility with older
# code that might get ported directly from Arduino.
def begin(self, heatTime=defaultHeatTime):
self.writeBytes(
27, # Esc
55, # 7 (print settings)
20, # Heat dots (20 = balance darkness w/no jams)
heatTime, # Lib default = 45
250) # Heat interval (500 uS = slower but darker)
def reset(self):
self.prevByte = '\n' # Treat as if prior line is blank
self.column = 0
self.maxColumn = 32
self.charHeight = 24
self.lineSpacing = 8
self.barcodeHeight = 50
self.writeBytes(27, 64)
# Reset text formatting parameters.
def setDefault(self):
self.online()
self.justify('L')
self.inverseOff()
self.doubleHeightOff()
self.setLineHeight(32)
self.boldOff()
self.underlineOff()
self.setBarcodeHeight(50)
self.setSize('s')
def test(self):
self.writeBytes(18, 84)
self.timeoutSet(
self.dotPrintTime * 24 * 26 +
self.dotFeedTime * (8 * 26 + 32))
UPC_A = 0
UPC_E = 1
EAN13 = 2
EAN8 = 3
CODE39 = 4
I25 = 5
CODEBAR = 6
CODE93 = 7
CODE128 = 8
CODE11 = 9
MSI = 10
def printBarcode(self, text, type):
self.writeBytes(
29, 72, 2, # Print label below barcode
29, 119, 3, # Barcode width
29, 107, type) # Barcode type
# Print string
self.timeoutWait()
self.timeoutSet((self.barcodeHeight + 40) * self.dotPrintTime)
super(Adafruit_Thermal, self).write(text)
self.prevByte = '\n'
self.feed(2)
def setBarcodeHeight(self, val=50):
if val < 1:
val = 1
self.barcodeHeight = val
self.writeBytes(29, 104, val)
# === Character commands ===
INVERSE_MASK = (1 << 1)
UPDOWN_MASK = (1 << 2)
BOLD_MASK = (1 << 3)
DOUBLE_HEIGHT_MASK = (1 << 4)
DOUBLE_WIDTH_MASK = (1 << 5)
STRIKE_MASK = (1 << 6)
def setPrintMode(self, mask):
self.printMode |= mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def unsetPrintMode(self, mask):
self.printMode &= ~mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def writePrintMode(self):
self.writeBytes(27, 33, self.printMode)
def normal(self):
self.printMode = 0
self.writePrintMode()
def inverseOn(self):
self.setPrintMode(self.INVERSE_MASK)
def inverseOff(self):
self.unsetPrintMode(self.INVERSE_MASK)
def upsideDownOn(self):
self.setPrintMode(self.UPDOWN_MASK)
def upsideDownOff(self):
self.unsetPrintMode(self.UPDOWN_MASK)
def doubleHeightOn(self):
self.setPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleHeightOff(self):
self.unsetPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleWidthOn(self):
self.setPrintMode(self.DOUBLE_WIDTH_MASK)
def doubleWidthOff(self):
self.unsetPrintMode(self.DOUBLE_WIDTH_MASK)
def strikeOn(self):
self.setPrintMode(self.STRIKE_MASK)
def strikeOff(self):
self.unsetPrintMode(self.STRIKE_MASK)
def boldOn(self):
self.setPrintMode(self.BOLD_MASK)
def boldOff(self):
self.unsetPrintMode(self.BOLD_MASK)
def justify(self, value):
c = value.upper()
if c == 'C':
pos = 1
elif c == 'R':
pos = 2
else:
pos = 0
self.writeBytes(0x1B, 0x61, pos)
# Feeds by the specified number of lines
def feed(self, x=1):
# The datasheet claims sending bytes 27, 100, <x> will work,
# but it feeds much more than that. So it's done manually:
while x > 0:
self.write('\n')
x -= 1
# Feeds by the specified number of individual pixel rows
def feedRows(self, rows):
self.writeBytes(27, 74, rows)
self.timeoutSet(rows * dotFeedTime)
def flush(self):
self.writeBytes(12)
def setSize(self, value):
c = value.upper()
if c == 'L': # Large: double width and height
size = 0x11
self.charHeight = 48
self.maxColumn = 16
elif c == 'M': # Medium: double height
size = 0x01
self.charHeight = 48
self.maxColumn = 32
else: # Small: standard width and height
size = 0x00
self.charHeight = 24
self.maxColumn = 32
self.writeBytes(29, 33, size, 10)
prevByte = '\n' # Setting the size adds a linefeed
# Underlines of different weights can be produced:
# 0 - no underline
# 1 - normal underline
# 2 - thick underline
def underlineOn(self, weight=1):
self.writeBytes(27, 45, weight)
def underlineOff(self):
self.underlineOn(0)
def printBitmap(self, w, h, bitmap, LaaT=False):
rowBytes = (w + 7) / 8 # Round up to next byte boundary
if rowBytes >= 48:
rowBytesClipped = 48 # 384 pixels max width
else:
rowBytesClipped = rowBytes
# if LaaT (line-at-a-time) is True, print bitmaps
# scanline-at-a-time (rather than in chunks).
# This tends to make for much cleaner printing
# (no feed gaps) on large images...but has the
# opposite effect on small images that would fit
# in a single 'chunk', so use carefully!
if LaaT: maxChunkHeight = 1
else: maxChunkHeight = 255
i = 0
for rowStart in range(0, h, maxChunkHeight):
chunkHeight = h - rowStart
if chunkHeight > maxChunkHeight:
chunkHeight = maxChunkHeight
# Timeout wait happens here
self.writeBytes(18, 42, chunkHeight, rowBytesClipped)
for y in range(chunkHeight):
for x in range(rowBytesClipped):
super(Adafruit_Thermal, self).write(
chr(bitmap[i]))
i += 1
i += rowBytes - rowBytesClipped
self.timeoutSet(chunkHeight * self.dotPrintTime)
self.prevByte = '\n'
# Print Image. Requires Python Imaging Library. This is
# specific to the Python port and not present in the Arduino
# library. Image will be cropped to 384 pixels width if
# necessary, and converted to 1-bit w/diffusion dithering.
# For any other behavior (scale, B&W threshold, etc.), use
# the Imaging Library to perform such operations before
# passing the result to this function.
def printImage(self, image, LaaT=False):
from PIL import Image
if image.mode != '1':
image = image.convert('1')
width = image.size[0]
height = image.size[1]
if width > 384:
width = 384
rowBytes = (width + 7) / 8
bitmap = bytearray(rowBytes * height)
pixels = image.load()
for y in range(height):
n = y * rowBytes
x = 0
for b in range(rowBytes):
sum = 0
bit = 128
while bit > 0:
if x >= width: break
if pixels[x, y] == 0:
sum |= bit
x += 1
bit >>= 1
bitmap[n + b] = sum
self.printBitmap(width, height, bitmap, LaaT)
# Take the printer offline. Print commands sent after this
# will be ignored until 'online' is called.
def offline(self):
self.writeBytes(27, 61, 0)
# Take the printer online. Subsequent print commands will be obeyed.
def online(self):
self.writeBytes(27, 61, 1)
# Put the printer into a low-energy state immediately.
def sleep(self):
self.sleepAfter(1)
# Put the printer into a low-energy state after
# the given number of seconds.
def sleepAfter(self, seconds):
self.writeBytes(27, 56, seconds)
def wake(self):
self.timeoutSet(0);
self.writeBytes(255)
for i in range(10):
self.writeBytes(27)
self.timeoutSet(0.1)
# Empty method, included for compatibility
# with existing code ported from Arduino.
def listen(self):
pass
# Check the status of the paper using the printers self reporting
# ability. Doesn't match the datasheet...
# Returns True for paper, False for no paper.
def hasPaper(self):
self.writeBytes(27, 118, 0)
# Bit 2 of response seems to be paper status
stat = ord(self.read(1)) & 0b00000100
# If set, we have paper; if clear, no paper
return stat == 0
def setLineHeight(self, val=32):
if val < 24:
val = 24
self.lineSpacing = val - 24
# The printer doesn't take into account the current text
# height when setting line height, making this more akin
# to inter-line spacing. Default line spacing is 32
# (char height of 24, line spacing of 8).
self.writeBytes(27, 51, val)
# Copied from Arduino lib for parity; is marked 'not working' there
def tab(self):
self.writeBytes(9)
# Copied from Arduino lib for parity; is marked 'not working' there
def setCharSpacing(self, spacing):
self.writeBytes(27, 32, 0, 10)
# Overloading print() in Python pre-3.0 is dirty pool,
# but these are here to provide more direct compatibility
# with existing code written for the Arduino library.
def print(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
# For Arduino code compatibility again
def println(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
self.write('\n')
| mit |
fenglu-g/incubator-airflow | airflow/contrib/operators/vertica_operator.py | 9 | 1925 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.vertica_hook import VerticaHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class VerticaOperator(BaseOperator):
"""
Executes sql code in a specific Vertica database
:param vertica_conn_id: reference to a specific Vertica database
:type vertica_conn_id: str
:param sql: the sql code to be executed. (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#b4e0ff'
@apply_defaults
def __init__(self, sql, vertica_conn_id='vertica_default', *args, **kwargs):
super(VerticaOperator, self).__init__(*args, **kwargs)
self.vertica_conn_id = vertica_conn_id
self.sql = sql
def execute(self, context):
self.log.info('Executing: %s', self.sql)
hook = VerticaHook(vertica_conn_id=self.vertica_conn_id)
hook.run(self.sql)
| apache-2.0 |
nicjhan/tango | grids/square2scrip.py | 1 | 4754 | #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import numpy as np
import netCDF4 as nc
"""
Create a NetCDF grid definition file in SCRIP format. The grid created is a
uniform lat-lon grid.
"""
class LatLonGrid:
"""
"""
def __init__(self, num_lon_points, num_lat_points, mask=None):
self.num_lon_points = num_lon_points
self.num_lat_points = num_lat_points
self.corners = 4
self.mask = mask;
if mask is None:
self.mask = np.ones((num_lon_points, num_lat_points))
dx = 360.0 / num_lon_points
dy = 180.0 / num_lat_points
dx_half = dx / 2
dy_half = dy / 2
# Set lats and lons.
self.lon = np.linspace(0, 360, num_lon_points, endpoint=False)
# lat points exclude the poles.
self.lat = np.linspace(-90 + dy_half, 90 - dy_half, num_lat_points)
# Similar to lon, lat but specify the coordinate at every grid
# point. Also it wraps along longitude.
self.x = np.tile(self.lon, (num_lat_points, 1))
self.y = np.tile(self.lat, (num_lon_points, 1))
self.y = self.y.transpose()
def make_corners(x, y, dx, dy):
# Set grid corners, we do these one corner at a time. Start at the
# bottom left and go anti-clockwise. This is the SCRIP convention.
clon = np.empty((x.shape[0], x.shape[1], self.corners))
clon[:] = np.NAN
clon[:,:,0] = x - dx
clon[:,:,1] = x + dx
clon[:,:,2] = x + dx
clon[:,:,3] = x - dx
assert(not np.isnan(np.sum(clon)))
clat = np.empty((x.shape[0], x.shape[1], self.corners))
clat[:] = np.NAN
clat[:,:,0] = y - dy
clat[:,:,1] = y - dy
clat[:,:,2] = y + dy
clat[:,:,3] = y + dy
assert(not np.isnan(np.sum(clat)))
# The bottom latitude band should always be Southern extent.
assert(np.all(clat[0, :, 0] == -90))
assert(np.all(clat[0, :, 1] == -90))
# The top latitude band should always be Northern extent.
assert(np.all(clat[-1, :, 2] == 90))
assert(np.all(clat[-1, :, 3] == 90))
return clon, clat
self.clon, self.clat = make_corners(self.x, self.y, dx_half, dy_half)
def write(self, command, output):
"""
Create the netcdf file and write the output.
"""
f = nc.Dataset(output, 'w')
f.createDimension('grid_size',
self.num_lon_points * self.num_lat_points)
f.createDimension('grid_corners', 4)
f.createDimension('grid_rank', 2)
grid_dims = f.createVariable('grid_dims', 'i4', ('grid_rank'))
grid_dims[:] = [self.num_lon_points, self.num_lat_points]
center_lat = f.createVariable('grid_center_lat', 'f8', ('grid_size'))
center_lat.units = 'degrees'
center_lat[:] = self.y[:].flatten()
center_lon = f.createVariable('grid_center_lon', 'f8', ('grid_size'))
center_lon.units = 'degrees'
center_lon[:] = self.x[:].flatten()
imask = f.createVariable('grid_imask', 'i4', ('grid_size'))
imask.units = 'unitless'
imask[:] = self.mask[:].flatten()
corner_lat = f.createVariable('grid_corner_lat', 'f8',
('grid_size', 'grid_corners'))
corner_lat.units = 'degrees'
corner_lat[:] = self.clat[:].flatten()
corner_lon = f.createVariable('grid_corner_lon', 'f8',
('grid_size', 'grid_corners'))
corner_lon.units = 'degrees'
corner_lon[:] = self.clon[:].flatten()
f.title = '{}x{} rectangular grid'.format(self.num_lon_points,
self.num_lat_points)
f.history = command
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("output", help="The output file name.")
parser.add_argument("--lon_points", default=1, type=int, help="""
The number of longitude points.
""")
parser.add_argument("--lat_points", default=1, type=int, help="""
The number of latitude points.
""")
parser.add_argument("--mask", default=None, help="The mask file name.")
args = parser.parse_args()
mask = None
if args.mask is not None:
# Read in the mask.
pass
grid = LatLonGrid(args.lon_points, args.lat_points, mask)
grid.write(' '.join(sys.argv), args.output)
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
JioCloud/ironic | ironic/conductor/utils.py | 2 | 5688 | # coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.openstack.common import excutils
from ironic.openstack.common import log
LOG = log.getLogger(__name__)
@task_manager.require_exclusive_lock
def node_set_boot_device(task, node, device, persistent=False):
"""Set the boot device for a node.
:param task: a TaskManager instance.
:param node: The Node.
:param device: Boot device. Values are vendor-specific.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
"""
try:
task.driver.vendor.vendor_passthru(task, node,
device=device,
persistent=persistent,
method='set_boot_device')
except exception.UnsupportedDriverExtension:
# NOTE(deva): Some drivers, like SSH, do not support set_boot_device.
# This is not a fatal exception.
pass
@task_manager.require_exclusive_lock
def node_power_action(task, node, state):
"""Change power state or reset for a node.
Validate whether the given power transition is possible and perform
power action.
:param task: a TaskManager instance.
:param node: the Node object to act upon.
:param state: Any power state from ironic.common.states. If the
state is 'REBOOT' then a reboot will be attempted, otherwise
the node power state is directly set to 'state'.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
context = task.context
new_state = states.POWER_ON if state == states.REBOOT else state
try:
task.driver.power.validate(task, node)
if state != states.REBOOT:
curr_state = task.driver.power.get_power_state(task, node)
except Exception as e:
with excutils.save_and_reraise_exception():
node['last_error'] = \
_("Failed to change power state to '%(target)s'. "
"Error: %(error)s") % {
'target': new_state, 'error': e}
node.save(context)
if state != states.REBOOT and curr_state == new_state:
# Neither the ironic service nor the hardware has erred. The
# node is, for some reason, already in the requested state,
# though we don't know why. eg, perhaps the user previously
# requested the node POWER_ON, the network delayed those IPMI
# packets, and they are trying again -- but the node finally
# responds to the first request, and so the second request
# gets to this check and stops.
# This isn't an error, so we'll clear last_error field
# (from previous operation), log a warning, and return.
node['last_error'] = None
node.save(context)
LOG.warn(_("Not going to change_node_power_state because "
"current state = requested state = '%(state)s'.")
% {'state': curr_state})
return
# Set the target_power_state and clear any last_error, since we're
# starting a new operation. This will expose to other processes
# and clients that work is in progress.
node['target_power_state'] = new_state
node['last_error'] = None
node.save(context)
# take power action
try:
if state != states.REBOOT:
task.driver.power.set_power_state(task, node, new_state)
else:
task.driver.power.reboot(task, node)
except Exception as e:
with excutils.save_and_reraise_exception():
node['last_error'] = \
_("Failed to change power state to '%(target)s'. "
"Error: %(error)s") % {
'target': new_state, 'error': e}
else:
# success!
node['power_state'] = new_state
finally:
node['target_power_state'] = states.NOSTATE
node.save(context)
@task_manager.require_exclusive_lock
def cleanup_after_timeout(task):
"""Cleanup deploy task after timeout.
:param task: a TaskManager instance.
"""
node = task.node
context = task.context
error_msg = _('Cleanup failed for node %(node)s after deploy timeout: '
' %(error)s')
try:
task.driver.deploy.clean_up(task, node)
except exception.IronicException as e:
msg = error_msg % {'node': node.uuid, 'error': e}
LOG.error(msg)
node.last_error = msg
node.save(context)
except Exception as e:
msg = error_msg % {'node': node.uuid, 'error': e}
LOG.error(msg)
node.last_error = _('Deploy timed out, but an unhandled exception was '
'encountered while aborting. More info may be '
'found in the log file.')
node.save(context)
| apache-2.0 |
sillydan1/WhatEverEngine | openglcsharp/Lib/lib2to3/fixes/fix_throw.py | 327 | 1586 | """Fixer for generator.throw(E, V, T).
g.throw(E) -> g.throw(E)
g.throw(E, V) -> g.throw(E(V))
g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
g.throw("foo"[, V[, T]]) will warn about string exceptions."""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
class FixThrow(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
>
|
power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Leave "g.throw(E)" alone
val = results.get(u"val")
if val is None:
return
val = val.clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = u""
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = u""
e = Call(exc, args)
with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
| apache-2.0 |
40023154/final0627 | static/Brython3.1.1-20150328-091302/Lib/_strptime.py | 518 | 21683 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
#try:
#time.tzset()
#except AttributeError:
#pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904 # 1904 is first leap year of 20th century
leap_year_fix = True
elif year is None:
year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
# the caller didn't supply a year but asked for Feb 29th. We couldn't
# use the default of 1900 for computations. We set it back to ensure
# that February 29th is smaller than March 1st.
year = 1900
return (year, month, day,
hour, minute, second,
weekday, julian, tz, tzname, gmtoff), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
| gpl-3.0 |
mjbrewer/testindex | magnum/common/pythonk8sclient/client/models/V1beta3_PersistentVolumeSpec.py | 15 | 2529 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_PersistentVolumeSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'accessModes': 'list[V1beta3_AccessModeType]',
'capacity': 'dict',
'claimRef': 'V1beta3_ObjectReference',
'gcePersistentDisk': 'V1beta3_GCEPersistentDiskVolumeSource',
'hostPath': 'V1beta3_HostPathVolumeSource'
}
self.attributeMap = {
'accessModes': 'accessModes',
'capacity': 'capacity',
'claimRef': 'claimRef',
'gcePersistentDisk': 'gcePersistentDisk',
'hostPath': 'hostPath'
}
#all ways the volume can be mounted
self.accessModes = None # list[V1beta3_AccessModeType]
#a description of the persistent volume's resources and capacity
self.capacity = None # any
#the binding reference to a persistent volume claim
self.claimRef = None # V1beta3_ObjectReference
#GCE disk resource provisioned by an admin
self.gcePersistentDisk = None # V1beta3_GCEPersistentDiskVolumeSource
#a HostPath provisioned by a developer or tester; for develment use only
self.hostPath = None # V1beta3_HostPathVolumeSource
| apache-2.0 |
pratapvardhan/scikit-learn | sklearn/externals/joblib/testing.py | 45 | 2720 | """
Helper for testing.
"""
import sys
import warnings
import os.path
import re
import subprocess
import threading
from sklearn.externals.joblib._compat import PY3_OR_LATER
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
try:
from nose.tools import assert_raises_regex
except ImportError:
# For Python 2.7
try:
from nose.tools import assert_raises_regexp as assert_raises_regex
except ImportError:
# for Python 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def check_subprocess_call(cmd, timeout=1, stdout_regex=None):
"""Runs a command in a subprocess with timeout in seconds.
Also checks returncode is zero and stdout if stdout_regex is set.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_process():
proc.kill()
timer = threading.Timer(timeout, kill_process)
try:
timer.start()
stdout, stderr = proc.communicate()
if PY3_OR_LATER:
stdout, stderr = stdout.decode(), stderr.decode()
if proc.returncode != 0:
message = (
'Non-zero return code: {0}.\nStdout:\n{1}\n'
'Stderr:\n{2}').format(
proc.returncode, stdout, stderr)
raise ValueError(message)
if (stdout_regex is not None and
not re.search(stdout_regex, stdout)):
raise ValueError(
"Unexpected output: '{0!r}' does not match:\n{1!r}".format(
stdout_regex, stdout))
finally:
timer.cancel()
| bsd-3-clause |
wavelets/silk | django_silky/silk/tests/test_dynamic_profiling.py | 5 | 3232 | from django.test import TestCase
from mock import patch, Mock
import six
import silk
from silk.profiling.dynamic import _get_module, _get_parent_module, profile_function_or_method
class TestGetModule(TestCase):
"""test for _get_module"""
def test_singular(self):
module = _get_module('silk')
self.assertEqual(module.__class__.__name__, 'module')
self.assertEqual('silk', module.__name__)
self.assertTrue(hasattr(module, 'models'))
def test_dot(self):
module = _get_module('silk.models')
self.assertEqual(module.__class__.__name__, 'module')
self.assertEqual('silk.models', module.__name__)
self.assertTrue(hasattr(module, 'SQLQuery'))
class TestGetParentModule(TestCase):
"""test for silk.tools._get_parent_module"""
def test_singular(self):
parent = _get_parent_module(silk)
self.assertIsInstance(parent, dict)
def test_dot(self):
import silk.utils
parent = _get_parent_module(silk.utils)
self.assertEqual(parent, silk)
class MyClass(object):
def foo(self):
pass
def foo():
pass
def source_file_name():
file_name = __file__
if file_name[-1] == 'c':
file_name = file_name[:-1]
return file_name
class TestProfileFunction(TestCase):
def test_method_as_str(self):
# noinspection PyShadowingNames
def foo(_):
pass
# noinspection PyUnresolvedReferences
with patch.object(MyClass, 'foo', foo):
profile_function_or_method('silk.tests.test_dynamic_profiling', 'MyClass.foo', 'test')
mock_data_collector = Mock()
mock_data_collector.queries = []
with patch('silk.profiling.profiler.DataCollector', return_value=mock_data_collector) as mock_DataCollector:
MyClass().foo()
self.assertEqual(mock_DataCollector.return_value.register_profile.call_count, 1)
call_args = mock_DataCollector.return_value.register_profile.call_args[0][0]
self.assertDictContainsSubset({
'func_name': foo.__name__,
'dynamic': True,
'file_path': source_file_name(),
'name': 'test',
'line_num': six.get_function_code(foo).co_firstlineno
}, call_args)
def test_func_as_str(self):
name = foo.__name__
line_num = six.get_function_code(foo).co_firstlineno
profile_function_or_method('silk.tests.test_dynamic_profiling', 'foo', 'test')
mock_data_collector = Mock()
mock_data_collector.queries = []
with patch('silk.profiling.profiler.DataCollector', return_value=mock_data_collector) as mock_DataCollector:
foo()
self.assertEqual(mock_DataCollector.return_value.register_profile.call_count, 1)
call_args = mock_DataCollector.return_value.register_profile.call_args[0][0]
self.assertDictContainsSubset({
'func_name': name,
'dynamic': True,
'file_path': source_file_name(),
'name': 'test',
'line_num': line_num
}, call_args)
| mit |
ct-23/home-assistant | homeassistant/components/sensor/mqtt_room.py | 9 | 5270 | """
Support for MQTT room presence detection.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mqtt_room/
"""
import asyncio
import logging
import json
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.components.mqtt as mqtt
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_TIMEOUT)
from homeassistant.components.mqtt import CONF_STATE_TOPIC
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt, slugify
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
ATTR_DEVICE_ID = 'device_id'
ATTR_DISTANCE = 'distance'
ATTR_ID = 'id'
ATTR_ROOM = 'room'
CONF_DEVICE_ID = 'device_id'
CONF_ROOM = 'room'
CONF_AWAY_TIMEOUT = 'away_timeout'
DEFAULT_NAME = 'Room Sensor'
DEFAULT_TIMEOUT = 5
DEFAULT_AWAY_TIMEOUT = 0
DEFAULT_TOPIC = 'room_presence'
STATE_AWAY = 'away'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_STATE_TOPIC, default=DEFAULT_TOPIC): cv.string,
vol.Required(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_AWAY_TIMEOUT,
default=DEFAULT_AWAY_TIMEOUT): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
MQTT_PAYLOAD = vol.Schema(vol.All(json.loads, vol.Schema({
vol.Required(ATTR_ID): cv.string,
vol.Required(ATTR_DISTANCE): vol.Coerce(float),
}, extra=vol.ALLOW_EXTRA)))
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up MQTT room Sensor."""
async_add_devices([MQTTRoomSensor(
config.get(CONF_NAME),
config.get(CONF_STATE_TOPIC),
config.get(CONF_DEVICE_ID),
config.get(CONF_TIMEOUT),
config.get(CONF_AWAY_TIMEOUT)
)])
class MQTTRoomSensor(Entity):
"""Representation of a room sensor that is updated via MQTT."""
def __init__(self, name, state_topic, device_id, timeout, consider_home):
"""Initialize the sensor."""
self._state = STATE_AWAY
self._name = name
self._state_topic = '{}{}'.format(state_topic, '/+')
self._device_id = slugify(device_id).upper()
self._timeout = timeout
self._consider_home = \
timedelta(seconds=consider_home) if consider_home \
else None
self._distance = None
self._updated = None
def async_added_to_hass(self):
"""Subscribe to MQTT events.
This method must be run in the event loop and returns a coroutine.
"""
@callback
def update_state(device_id, room, distance):
"""Update the sensor state."""
self._state = room
self._distance = distance
self._updated = dt.utcnow()
self.hass.async_add_job(self.async_update_ha_state())
@callback
def message_received(topic, payload, qos):
"""Handle new MQTT messages."""
try:
data = MQTT_PAYLOAD(payload)
except vol.MultipleInvalid as error:
_LOGGER.debug(
"Skipping update because of malformatted data: %s", error)
return
device = _parse_update_data(topic, data)
if device.get(CONF_DEVICE_ID) == self._device_id:
if self._distance is None or self._updated is None:
update_state(**device)
else:
# update if:
# device is in the same room OR
# device is closer to another room OR
# last update from other room was too long ago
timediff = dt.utcnow() - self._updated
if device.get(ATTR_ROOM) == self._state \
or device.get(ATTR_DISTANCE) < self._distance \
or timediff.seconds >= self._timeout:
update_state(**device)
return mqtt.async_subscribe(
self.hass, self._state_topic, message_received, 1)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DISTANCE: self._distance
}
@property
def state(self):
"""Return the current room of the entity."""
return self._state
def update(self):
"""Update the state for absent devices."""
if self._updated \
and self._consider_home \
and dt.utcnow() - self._updated > self._consider_home:
self._state = STATE_AWAY
def _parse_update_data(topic, data):
"""Parse the room presence update."""
parts = topic.split('/')
room = parts[-1]
device_id = slugify(data.get(ATTR_ID)).upper()
distance = data.get('distance')
parsed_data = {
ATTR_DEVICE_ID: device_id,
ATTR_ROOM: room,
ATTR_DISTANCE: distance
}
return parsed_data
| apache-2.0 |
michaelhush/M-LOOP | setup.py | 1 | 3091 | '''
Setup script for M-LOOP using setuptools. See the documentation of setuptools for further details.
'''
from __future__ import absolute_import, division, print_function
import multiprocessing as mp
import mloop as ml
from setuptools import setup, find_packages
from os import path
def main():
long_description = ''
here = path.abspath(path.dirname(__file__))
description_path = path.join(here, 'DESCRIPTION.rst')
if path.exists(description_path):
with open(description_path, 'rb') as stream:
long_description = stream.read().decode('utf8')
setup(
name = 'M-LOOP',
version = ml.__version__,
packages = find_packages(),
entry_points={
'console_scripts': [
'M-LOOP = mloop.cmd:run_mloop'
],
},
setup_requires=['pytest-runner'],
install_requires = ['pip>=7.0',
'docutils>=0.3',
'numpy>=1.11',
'scipy>=0.17',
'matplotlib>=1.5',
'pytest>=2.9',
'scikit-learn>=0.18',
'tensorflow>=2.0.0'],
tests_require=['pytest','setuptools>=26'],
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt','*.md'],
},
author = 'Michael R Hush',
author_email = 'MichaelRHush@gmail.com',
description = 'M-LOOP: Machine-learning online optimization package. A python package of automated optimization tools - enhanced with machine-learning - for quantum scientific experiments, computer controlled systems or other optimization tasks.',
long_description = long_description,
license = 'MIT',
keywords = 'automated machine learning optimization optimisation science experiment quantum',
url = 'https://github.com/michaelhush/M-LOOP/',
download_url = 'https://github.com/michaelhush/M-LOOP/tarball/3.2.1',
classifiers = ['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Manufacturing',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Physics']
)
if __name__=='__main__':
mp.freeze_support()
main()
| mit |
FlorianLudwig/odoo | addons/claim_from_delivery/__openerp__.py | 261 | 1517 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
odoousers2014/LibrERP | purchase_requisition_extended/__init__.py | 2 | 1378 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Andrei Levin (andrei.levin at didotech.com)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
#
import wizard
import purchase_requisition
| agpl-3.0 |
LokiCoder/Sick-Beard | lib/enzyme/mp4.py | 70 | 16088 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2007 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2007 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import zlib
import logging
import StringIO
import struct
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
# http://developer.apple.com/documentation/QuickTime/QTFF/index.html
# http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap4/\
# chapter_5_section_2.html#//apple_ref/doc/uid/TP40000939-CH206-BBCBIICE
# Note: May need to define custom log level to work like ATOM_DEBUG did here
QTUDTA = {
'nam': 'title',
'aut': 'artist',
'cpy': 'copyright'
}
QTLANGUAGES = {
0: "en",
1: "fr",
2: "de",
3: "it",
4: "nl",
5: "sv",
6: "es",
7: "da",
8: "pt",
9: "no",
10: "he",
11: "ja",
12: "ar",
13: "fi",
14: "el",
15: "is",
16: "mt",
17: "tr",
18: "hr",
19: "Traditional Chinese",
20: "ur",
21: "hi",
22: "th",
23: "ko",
24: "lt",
25: "pl",
26: "hu",
27: "et",
28: "lv",
29: "Lappish",
30: "fo",
31: "Farsi",
32: "ru",
33: "Simplified Chinese",
34: "Flemish",
35: "ga",
36: "sq",
37: "ro",
38: "cs",
39: "sk",
40: "sl",
41: "yi",
42: "sr",
43: "mk",
44: "bg",
45: "uk",
46: "be",
47: "uz",
48: "kk",
49: "az",
50: "AzerbaijanAr",
51: "hy",
52: "ka",
53: "mo",
54: "ky",
55: "tg",
56: "tk",
57: "mn",
58: "MongolianCyr",
59: "ps",
60: "ku",
61: "ks",
62: "sd",
63: "bo",
64: "ne",
65: "sa",
66: "mr",
67: "bn",
68: "as",
69: "gu",
70: "pa",
71: "or",
72: "ml",
73: "kn",
74: "ta",
75: "te",
76: "si",
77: "my",
78: "Khmer",
79: "lo",
80: "vi",
81: "id",
82: "tl",
83: "MalayRoman",
84: "MalayArabic",
85: "am",
86: "ti",
87: "om",
88: "so",
89: "sw",
90: "Ruanda",
91: "Rundi",
92: "Chewa",
93: "mg",
94: "eo",
128: "cy",
129: "eu",
130: "ca",
131: "la",
132: "qu",
133: "gn",
134: "ay",
135: "tt",
136: "ug",
137: "Dzongkha",
138: "JavaneseRom",
}
class MPEG4(core.AVContainer):
"""
Parser for the MP4 container format. This format is mostly
identical to Apple Quicktime and 3GP files. It maps to mp4, mov,
qt and some other extensions.
"""
table_mapping = {'QTUDTA': QTUDTA}
def __init__(self, file):
core.AVContainer.__init__(self)
self._references = []
self.mime = 'video/quicktime'
self.type = 'Quicktime Video'
h = file.read(8)
try:
(size, type) = struct.unpack('>I4s', h)
except struct.error:
# EOF.
raise ParseError()
if type == 'ftyp':
# file type information
if size >= 12:
# this should always happen
if file.read(4) != 'qt ':
# not a quicktime movie, it is a mpeg4 container
self.mime = 'video/mp4'
self.type = 'MPEG-4 Video'
size -= 4
file.seek(size - 8, 1)
h = file.read(8)
(size, type) = struct.unpack('>I4s', h)
while type in ['mdat', 'skip']:
# movie data at the beginning, skip
file.seek(size - 8, 1)
h = file.read(8)
(size, type) = struct.unpack('>I4s', h)
if not type in ['moov', 'wide', 'free']:
log.debug(u'invalid header: %r' % type)
raise ParseError()
# Extended size
if size == 1:
size = struct.unpack('>Q', file.read(8))
# Back over the atom header we just read, since _readatom expects the
# file position to be at the start of an atom.
file.seek(-8, 1)
while self._readatom(file):
pass
if self._references:
self._set('references', self._references)
def _readatom(self, file):
s = file.read(8)
if len(s) < 8:
return 0
atomsize, atomtype = struct.unpack('>I4s', s)
if not str(atomtype).decode('latin1').isalnum():
# stop at nonsense data
return 0
log.debug(u'%r [%X]' % (atomtype, atomsize))
if atomtype == 'udta':
# Userdata (Metadata)
pos = 0
tabl = {}
i18ntabl = {}
atomdata = file.read(atomsize - 8)
while pos < atomsize - 12:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if ord(datatype[0]) == 169:
# i18n Metadata...
mypos = 8 + pos
while mypos + 4 < datasize + pos:
# first 4 Bytes are i18n header
(tlen, lang) = struct.unpack('>HH', atomdata[mypos:mypos + 4])
i18ntabl[lang] = i18ntabl.get(lang, {})
l = atomdata[mypos + 4:mypos + tlen + 4]
i18ntabl[lang][datatype[1:]] = l
mypos += tlen + 4
elif datatype == 'WLOC':
# Drop Window Location
pass
else:
if ord(atomdata[pos + 8:pos + datasize][0]) > 1:
tabl[datatype] = atomdata[pos + 8:pos + datasize]
pos += datasize
if len(i18ntabl.keys()) > 0:
for k in i18ntabl.keys():
if QTLANGUAGES.has_key(k) and QTLANGUAGES[k] == 'en':
self._appendtable('QTUDTA', i18ntabl[k])
self._appendtable('QTUDTA', tabl)
else:
log.debug(u'NO i18')
self._appendtable('QTUDTA', tabl)
elif atomtype == 'trak':
atomdata = file.read(atomsize - 8)
pos = 0
trackinfo = {}
tracktype = None
while pos < atomsize - 8:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if datatype == 'tkhd':
tkhd = struct.unpack('>6I8x4H36xII', atomdata[pos + 8:pos + datasize])
trackinfo['width'] = tkhd[10] >> 16
trackinfo['height'] = tkhd[11] >> 16
trackinfo['id'] = tkhd[3]
try:
# XXX Timestamp of Seconds is since January 1st 1904!
# XXX 2082844800 is the difference between Unix and
# XXX Apple time. FIXME to work on Apple, too
self.timestamp = int(tkhd[1]) - 2082844800
except Exception, e:
log.exception(u'There was trouble extracting timestamp')
elif datatype == 'mdia':
pos += 8
datasize -= 8
log.debug(u'--> mdia information')
while datasize:
mdia = struct.unpack('>I4s', atomdata[pos:pos + 8])
if mdia[1] == 'mdhd':
# Parse based on version of mdhd header. See
# http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd
ver = ord(atomdata[pos + 8])
if ver == 0:
mdhd = struct.unpack('>IIIIIhh', atomdata[pos + 8:pos + 8 + 24])
elif ver == 1:
mdhd = struct.unpack('>IQQIQhh', atomdata[pos + 8:pos + 8 + 36])
else:
mdhd = None
if mdhd:
# duration / time scale
trackinfo['length'] = mdhd[4] / mdhd[3]
if mdhd[5] in QTLANGUAGES:
trackinfo['language'] = QTLANGUAGES[mdhd[5]]
# mdhd[6] == quality
self.length = max(self.length, mdhd[4] / mdhd[3])
elif mdia[1] == 'minf':
# minf has only atoms inside
pos -= (mdia[0] - 8)
datasize += (mdia[0] - 8)
elif mdia[1] == 'stbl':
# stbl has only atoms inside
pos -= (mdia[0] - 8)
datasize += (mdia[0] - 8)
elif mdia[1] == 'hdlr':
hdlr = struct.unpack('>I4s4s', atomdata[pos + 8:pos + 8 + 12])
if hdlr[1] == 'mhlr':
if hdlr[2] == 'vide':
tracktype = 'video'
if hdlr[2] == 'soun':
tracktype = 'audio'
elif mdia[1] == 'stsd':
stsd = struct.unpack('>2I', atomdata[pos + 8:pos + 8 + 8])
if stsd[1] > 0:
codec = atomdata[pos + 16:pos + 16 + 8]
codec = struct.unpack('>I4s', codec)
trackinfo['codec'] = codec[1]
if codec[1] == 'jpeg':
tracktype = 'image'
elif mdia[1] == 'dinf':
dref = struct.unpack('>I4s', atomdata[pos + 8:pos + 8 + 8])
log.debug(u' --> %r, %r (useless)' % mdia)
if dref[1] == 'dref':
num = struct.unpack('>I', atomdata[pos + 20:pos + 20 + 4])[0]
rpos = pos + 20 + 4
for ref in range(num):
# FIXME: do somthing if this references
ref = struct.unpack('>I3s', atomdata[rpos:rpos + 7])
data = atomdata[rpos + 7:rpos + ref[0]]
rpos += ref[0]
else:
if mdia[1].startswith('st'):
log.debug(u' --> %r, %r (sample)' % mdia)
elif mdia[1] == 'vmhd' and not tracktype:
# indicates that this track is video
tracktype = 'video'
elif mdia[1] in ['vmhd', 'smhd'] and not tracktype:
# indicates that this track is audio
tracktype = 'audio'
else:
log.debug(u' --> %r, %r (unknown)' % mdia)
pos += mdia[0]
datasize -= mdia[0]
elif datatype == 'udta':
log.debug(u'udta: %r' % struct.unpack('>I4s', atomdata[:8]))
else:
if datatype == 'edts':
log.debug(u'--> %r [%d] (edit list)' % \
(datatype, datasize))
else:
log.debug(u'--> %r [%d] (unknown)' % \
(datatype, datasize))
pos += datasize
info = None
if tracktype == 'video':
info = core.VideoStream()
self.video.append(info)
if tracktype == 'audio':
info = core.AudioStream()
self.audio.append(info)
if info:
for key, value in trackinfo.items():
setattr(info, key, value)
elif atomtype == 'mvhd':
# movie header
mvhd = struct.unpack('>6I2h', file.read(28))
self.length = max(self.length, mvhd[4] / mvhd[3])
self.volume = mvhd[6]
file.seek(atomsize - 8 - 28, 1)
elif atomtype == 'cmov':
# compressed movie
datasize, atomtype = struct.unpack('>I4s', file.read(8))
if not atomtype == 'dcom':
return atomsize
method = struct.unpack('>4s', file.read(datasize - 8))[0]
datasize, atomtype = struct.unpack('>I4s', file.read(8))
if not atomtype == 'cmvd':
return atomsize
if method == 'zlib':
data = file.read(datasize - 8)
try:
decompressed = zlib.decompress(data)
except Exception, e:
try:
decompressed = zlib.decompress(data[4:])
except Exception, e:
log.exception(u'There was a proble decompressiong atom')
return atomsize
decompressedIO = StringIO.StringIO(decompressed)
while self._readatom(decompressedIO):
pass
else:
log.info(u'unknown compression %r' % method)
# unknown compression method
file.seek(datasize - 8, 1)
elif atomtype == 'moov':
# decompressed movie info
while self._readatom(file):
pass
elif atomtype == 'mdat':
pos = file.tell() + atomsize - 8
# maybe there is data inside the mdat
log.info(u'parsing mdat')
while self._readatom(file):
pass
log.info(u'end of mdat')
file.seek(pos, 0)
elif atomtype == 'rmra':
# reference list
while self._readatom(file):
pass
elif atomtype == 'rmda':
# reference
atomdata = file.read(atomsize - 8)
pos = 0
url = ''
quality = 0
datarate = 0
while pos < atomsize - 8:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if datatype == 'rdrf':
rflags, rtype, rlen = struct.unpack('>I4sI', atomdata[pos + 8:pos + 20])
if rtype == 'url ':
url = atomdata[pos + 20:pos + 20 + rlen]
if url.find('\0') > 0:
url = url[:url.find('\0')]
elif datatype == 'rmqu':
quality = struct.unpack('>I', atomdata[pos + 8:pos + 12])[0]
elif datatype == 'rmdr':
datarate = struct.unpack('>I', atomdata[pos + 12:pos + 16])[0]
pos += datasize
if url:
self._references.append((url, quality, datarate))
else:
if not atomtype in ['wide', 'free']:
log.info(u'unhandled base atom %r' % atomtype)
# Skip unknown atoms
try:
file.seek(atomsize - 8, 1)
except IOError:
return 0
return atomsize
Parser = MPEG4
| gpl-3.0 |
Cashiuus/metagoofil | hachoir_parser/misc/ole2.py | 74 | 14203 | """
Microsoft Office documents parser.
OLE2 files are also used by many other programs to store data.
Informations:
* wordole.c of AntiWord program (v0.35)
Copyright (C) 1998-2003 A.J. van Os
Released under GNU GPL
http://www.winfield.demon.nl/
* File gsf-infile-msole.c of libgsf library (v1.14.0)
Copyright (C) 2002-2004 Jody Goldberg (jody@gnome.org)
Released under GNU LGPL 2.1
http://freshmeat.net/projects/libgsf/
* PDF from AAF Association
Copyright (C) 2004 AAF Association
Copyright (C) 1991-2003 Microsoft Corporation
http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf
Author: Victor Stinner
Creation: 2006-04-23
"""
from hachoir_parser import HachoirParser
from hachoir_core.field import (
FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet,
UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum,
Bytes, NullBytes, String)
from hachoir_core.text_handler import filesizeHandler
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_parser.common.win32 import GUID
from hachoir_parser.misc.msoffice import PROPERTY_NAME, RootEntry, RawParser, CustomFragment
MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes
MAX_BIG_BLOCK_LOG2 = 14 # 64 kB
# Number of items in DIFAT
NB_DIFAT = 109
class SECT(UInt32):
UNUSED = 0xFFFFFFFF # -1
END_OF_CHAIN = 0xFFFFFFFE # -2
BFAT_SECTOR = 0xFFFFFFFD # -3
DIFAT_SECTOR = 0xFFFFFFFC # -4
SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR))
special_value_name = {
UNUSED: "unused",
END_OF_CHAIN: "end of a chain",
BFAT_SECTOR: "BFAT sector (in a FAT)",
DIFAT_SECTOR: "DIFAT sector (in a FAT)",
}
def __init__(self, parent, name, description=None):
UInt32.__init__(self, parent, name, description)
def createDisplay(self):
val = self.value
return SECT.special_value_name.get(val, str(val))
class Property(FieldSet):
TYPE_ROOT = 5
TYPE_NAME = {
1: "storage",
2: "stream",
3: "ILockBytes",
4: "IPropertyStorage",
5: "root"
}
DECORATOR_NAME = {
0: "red",
1: "black",
}
static_size = 128 * 8
def createFields(self):
bytes = self.stream.readBytes(self.absolute_address, 4)
if bytes == "\0R\0\0":
charset = "UTF-16-BE"
else:
charset = "UTF-16-LE"
yield String(self, "name", 64, charset=charset, truncate="\0")
yield UInt16(self, "namelen", "Length of the name")
yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME)
yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME)
yield SECT(self, "left")
yield SECT(self, "right")
yield SECT(self, "child", "Child node (valid for storage and root types)")
yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)")
yield NullBytes(self, "flags", 4, "User flags")
yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)")
yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)")
yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)")
if self["/header/bb_shift"].value == 9:
yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)"))
yield NullBytes(self, "padding", 4)
else:
yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)"))
def createDescription(self):
name = self["name"].display
size = self["size"].display
return "Property: %s (%s)" % (name, size)
class DIFat(SeekableFieldSet):
def __init__(self, parent, name, db_start, db_count, description=None):
SeekableFieldSet.__init__(self, parent, name, description)
self.start=db_start
self.count=db_count
def createFields(self):
for index in xrange(NB_DIFAT):
yield SECT(self, "index[%u]" % index)
difat_sect = self.start
index = NB_DIFAT
entries_per_sect = self.parent.sector_size / 32 - 1
for ctr in xrange(self.count):
# this is relative to real DIFAT start
self.seekBit(NB_DIFAT*SECT.static_size + self.parent.sector_size*difat_sect)
for sect_index in xrange(entries_per_sect):
yield SECT(self, "index[%u]" % (index+sect_index))
index += entries_per_sect
next = SECT(self, "difat[%u]" % ctr)
yield next
difat_sect = next.value
class Header(FieldSet):
static_size = 68 * 8
def createFields(self):
yield GUID(self, "clsid", "16 bytes GUID used by some apps")
yield UInt16(self, "ver_min", "Minor version")
yield UInt16(self, "ver_maj", "Major version")
yield Bytes(self, "endian", 2, "Endian (\\xfe\\xff for little endian)")
yield UInt16(self, "bb_shift", "Log, base 2, of the big block size")
yield UInt16(self, "sb_shift", "Log, base 2, of the small block size")
yield NullBytes(self, "reserved[]", 6, "(reserved)")
yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)")
yield UInt32(self, "bb_count", "Number of Big Block Depot blocks")
yield SECT(self, "bb_start", "Root start block")
yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)")
yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)")
yield SECT(self, "sb_start", "Small Block Depot start block")
yield UInt32(self, "sb_count")
yield SECT(self, "db_start", "First block of DIFAT")
yield UInt32(self, "db_count", "Number of SECTs in DIFAT")
# Header (ole_id, header, difat) size in bytes
HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size
class SectFat(FieldSet):
def __init__(self, parent, name, start, count, description=None):
FieldSet.__init__(self, parent, name, description, size=count*32)
self.count = count
self.start = start
def createFields(self):
for i in xrange(self.start, self.start + self.count):
yield SECT(self, "index[%u]" % i)
class OLE2_File(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "ole2",
"category": "misc",
"file_ext": (
"db", # Thumbs.db
"doc", "dot", # Microsoft Word
"ppt", "ppz", "pps", "pot", # Microsoft Powerpoint
"xls", "xla", # Microsoft Excel
"msi", # Windows installer
),
"mime": (
u"application/msword",
u"application/msexcel",
u"application/mspowerpoint",
),
"min_size": 512*8,
"description": "Microsoft Office document",
"magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),),
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
return "Invalid magic"
if self["header/ver_maj"].value not in (3, 4):
return "Unknown major version (%s)" % self["header/ver_maj"].value
if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"):
return "Unknown endian (%s)" % self["header/endian"].raw_display
if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2):
return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value
if self["header/bb_shift"].value < self["header/sb_shift"].value:
return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \
% (self["header/sb_shift"].value, self["header/bb_shift"].value)
return True
def createFields(self):
# Signature
yield Bytes(self, "ole_id", 8, "OLE object signature")
header = Header(self, "header")
yield header
# Configure values
self.sector_size = (8 << header["bb_shift"].value)
self.fat_count = header["bb_count"].value
self.items_per_bbfat = self.sector_size / SECT.static_size
self.ss_size = (8 << header["sb_shift"].value)
self.items_per_ssfat = self.items_per_bbfat
# Read DIFAT (one level of indirection)
yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT")
# Read FAT (one level of indirection)
for field in self.readBFAT():
yield field
# Read SFAT
for field in self.readSFAT():
yield field
# Read properties
chain = self.getChain(self["header/bb_start"].value)
prop_per_sector = self.sector_size // Property.static_size
self.properties = []
for block in chain:
self.seekBlock(block)
for index in xrange(prop_per_sector):
property = Property(self, "property[]")
yield property
self.properties.append(property)
# Parse first property
for index, property in enumerate(self.properties):
if index == 0:
name, parser = 'root', RootEntry
else:
try:
name, parser = PROPERTY_NAME[property["name"].value]
except LookupError:
name = property.name+"content"
parser = RawParser
for field in self.parseProperty(property, name, parser):
yield field
def parseProperty(self, property, name_prefix, parser=RawParser):
if not property["size"].value:
return
if property["size"].value < self["header/threshold"].value and name_prefix!='root':
return
name = "%s[]" % name_prefix
first = None
previous = None
size = 0
fragment_group = None
chain = self.getChain(property["start"].value)
while True:
try:
block = chain.next()
contiguous = False
if first is None:
first = block
contiguous = True
if previous is not None and block == (previous+1):
contiguous = True
if contiguous:
previous = block
size += self.sector_size
continue
except StopIteration:
block = None
if first is None:
break
self.seekBlock(first)
desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1)
desc += " of %s bytes" % (self.sector_size // 8)
field = CustomFragment(self, name, size, parser, desc, fragment_group)
if not fragment_group:
fragment_group = field.group
fragment_group.args["datasize"] = property["size"].value
fragment_group.args["ole2name"] = property["name"].value
yield field
if block is None:
break
first = block
previous = block
size = self.sector_size
def getChain(self, start, use_sfat=False):
if use_sfat:
fat = self.ss_fat
items_per_fat = self.items_per_ssfat
err_prefix = "SFAT chain"
else:
fat = self.bb_fat
items_per_fat = self.items_per_bbfat
err_prefix = "BFAT chain"
block = start
block_set = set()
previous = block
while block != SECT.END_OF_CHAIN:
if block in SECT.SPECIALS:
raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous))
if block in block_set:
raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block))
block_set.add(block)
yield block
previous = block
index = block // items_per_fat
try:
block = fat[index]["index[%u]" % block].value
except LookupError, err:
break
def readBFAT(self):
self.bb_fat = []
start = 0
count = self.items_per_bbfat
for index, block in enumerate(self.array("difat/index")):
block = block.value
if block == SECT.UNUSED:
break
desc = "FAT %u/%u at block %u" % \
(1+index, self["header/bb_count"].value, block)
self.seekBlock(block)
field = SectFat(self, "bbfat[]", start, count, desc)
yield field
self.bb_fat.append(field)
start += count
def readSFAT(self):
chain = self.getChain(self["header/sb_start"].value)
start = 0
self.ss_fat = []
count = self.items_per_ssfat
for index, block in enumerate(chain):
self.seekBlock(block)
field = SectFat(self, "sfat[]", \
start, count, \
"SFAT %u/%u at block %u" % \
(1+index, self["header/sb_count"].value, block))
yield field
self.ss_fat.append(field)
start += count
def createContentSize(self):
max_block = 0
for fat in self.array("bbfat"):
for entry in fat:
block = entry.value
if block not in SECT.SPECIALS:
max_block = max(block, max_block)
if max_block in SECT.SPECIALS:
return None
else:
return HEADER_SIZE + (max_block+1) * self.sector_size
def seekBlock(self, block):
self.seekBit(HEADER_SIZE + block * self.sector_size)
| gpl-2.0 |
sergeneren/anima | anima/env/mayaEnv/pivot_switcher.py | 1 | 9055 | """
oyPivotSwitcher.py by Erkan Ozgur Yilmaz (c) 2009
v10.5.17
Description :
-------------
A tool for easy animating of switching of pivots
Version History :
-----------------
v10.5.17
- modifications for Maya 2011 and PyMel 1.0.2
v9.12.25
- removed oyAxialCorrectionGroup script import
- moved to new versioning scheme
v1.0.1
- setup check: now the objects pivot attributes are checked for safe setup
v1.0.0
- initial working version
v1.0.0.preAlpha
- development version
TODO List :
-----------
----------------------------------------------------------------------------
"""
__version__ = "10.5.17"
import pymel.core as pm
from anima.env.mayaEnv import auxiliary
class PivotSwitcher(object):
"""A utility class to help dynamically switch pivot positions in maya
"""
def __init__(self, _object):
# the object
self._object = auxiliary.get_valid_dag_node(_object)
assert (isinstance(self._object, pm.nodetypes.Transform))
# the data
self._futurePivot = pm.nodetypes.Transform
self._isSetup = False
# read the settings
self._read_settings()
def _read_settings(self):
"""reads the settings from the objects pivotData attribute
"""
# check if the object has pivotData attribute
if self._object.hasAttr("pivotData"):
# get the future pivot object
self._futurePivot = auxiliary.get_valid_dag_node(
pm.listConnections(
self._object.attr("pivotData.futurePivot")
)[0]
)
# set isSetup flag to True
self._isSetup = True
return True
return False
def _save_settings(self):
"""save settings inside objects pivotData attribute
"""
# data to be save :
# -----------------
# futurePivot node
# create attributes
self._create_data_attribute()
# connect futurePivot node
pm.connectAttr(
'%s%s' % (self._futurePivot.name(), ".message"),
self._object.attr("pivotData.futurePivot"),
f=True
)
def _create_data_attribute(self):
"""creates attribute in self._object to hold the data
"""
if not self._object.hasAttr("pivotData"):
pm.addAttr(self._object, ln="pivotData", at="compound", nc=1)
if not self._object.hasAttr("futurePivot"):
pm.addAttr(
self._object,
ln="futurePivot",
at="message",
p="pivotData"
)
def _create_future_pivot(self):
"""creates the futurePivot locator
"""
if self._isSetup:
return
# create a locator and move it to the current pivot
# parent the locator under the object
locator_name = self._object.name() + "_futurePivotLocator#"
self._futurePivot = \
auxiliary.get_valid_dag_node(pm.spaceLocator(n=locator_name))
pm.parent(self._futurePivot, self._object)
current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)
pm.xform(self._futurePivot, ws=True, t=current_pivot_pos[0:3])
# change the color
self._futurePivot.setAttr("overrideEnabled", 1)
self._futurePivot.setAttr("overrideColor", 13)
# set translate and visibility to non-keyable
self._futurePivot.setAttr("tx", k=False, channelBox=True)
self._futurePivot.setAttr("ty", k=False, channelBox=True)
self._futurePivot.setAttr("tz", k=False, channelBox=True)
self._futurePivot.setAttr("v", k=False, channelBox=True)
# lock scale and rotate
self._futurePivot.setAttr("rx", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("ry", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("rz", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sx", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sy", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sz", lock=True, k=False, channelBox=False)
# hide it
self._futurePivot.setAttr("v", 0)
def setup(self):
"""setups specified object for pivot switching
"""
# if it is setup before, don't do anything
if self._isSetup:
return
if not self.is_good_for_setup():
pm.PopupError(
"the objects pivots are connected to something\n"
"THE OBJECT CANNOT BE SETUP!!!"
)
return
# create the parent constraint
self._create_future_pivot()
# create attributes for data holding
self._create_data_attribute()
# save the settings
self._save_settings()
self._isSetup = True
def toggle(self):
"""toggles pivot visibility
"""
if not self._isSetup:
return
# toggle the pivot visibility
current_vis = self._futurePivot.getAttr("v")
current_vis = (current_vis + 1) % 2
self._futurePivot.setAttr("v", current_vis)
def switch(self):
"""switches the pivot to the futurePivot
"""
if not self._isSetup:
return
# get the current frame
frame = pm.currentTime(q=True)
# get the current position of the object
current_object_pos = pm.xform(self._object, q=True, ws=True, t=True)
current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)
future_pivot_pos = pm.xform(self._futurePivot, q=True, ws=True, t=True)
displacement = (future_pivot_pos[0] - current_pivot_pos[0],
future_pivot_pos[1] - current_pivot_pos[1],
future_pivot_pos[2] - current_pivot_pos[2])
# move the pivot to the future_pivot
pm.xform(self._object, ws=True, piv=future_pivot_pos[0:3])
# set keyframes
pm.setKeyframe(self._object, at="rotatePivotX", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotY", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotZ", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotX", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotY", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotZ", t=frame, ott="step")
# set pivot translations
self._object.setAttr("rotatePivotTranslate", -1 * displacement)
self._object.setAttr("scalePivotTranslate", -1 * displacement)
# set keyframes
pm.setKeyframe(self._object, at="rotatePivotTranslateX", t=frame,
ott="step")
pm.setKeyframe(self._object, at="rotatePivotTranslateY", t=frame,
ott="step")
pm.setKeyframe(self._object, at="rotatePivotTranslateZ", t=frame,
ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateX", t=frame,
ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateY", t=frame,
ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateZ", t=frame,
ott="step")
def _set_dg_dirty(self):
"""sets the DG to dirty for _object, currentPivot and futurePivot
"""
pm.dgdirty(self._object, self._futurePivot)
def fix_jump(self):
"""fixes the jumps after editing the keyframes
"""
pass
def is_good_for_setup(self):
"""checks if the objects rotatePivot, scalePivot, rotatePivotTranslate
and scalePivotTranslate is not connected to anything
"""
attributes = [
"rotatePivot",
"scalePivot",
"rotatePivotTranslate",
"scalePivotTranslate"
]
for attrStr in attributes:
connections = self._object.attr(attrStr).connections()
if len(connections) > 0:
return False
return True
def get_one_switcher():
"""returns a generator that generates a PivotSwitcher object for every
transform node in the selection
"""
for node in pm.ls(sl=True):
try:
node = auxiliary.get_valid_dag_node(node)
if node.type() == "transform":
my_pivot_switcher = PivotSwitcher(node)
yield my_pivot_switcher
except TypeError:
pass
def setup_pivot():
"""setups pivot switching for selected objects
"""
for piv_switcher in get_one_switcher():
piv_switcher.setup()
def switch_pivot():
"""switches pivot for selected objects
"""
for piv_switcher in get_one_switcher():
piv_switcher.switch()
def toggle_pivot():
"""toggles pivot visibilities for selected objects
"""
for piv_switcher in get_one_switcher():
piv_switcher.toggle()
| bsd-2-clause |
SoVictor/Lerna | dbtrash/models.py | 3 | 1580 | from django.db import models as md
class ActiveAdminComments(md.Model):
namespace = md.CharField(max_length=255)
body = md.TextField()
resource_id = md.CharField(max_length=255)
resource_type = md.CharField(max_length=255)
author_id = md.IntegerField()
author_type = md.CharField(max_length=255)
created_at = md.DateTimeField(auto_now_add=True)
updated_at = md.DateTimeField(auto_now=True)
class Meta:
db_table = 'active_admin_comments'
class CkeditorAssets(md.Model):
data_file_name = md.CharField(max_length=255)
data_content_type = md.CharField(max_length=255, blank=True, null=True)
data_file_size = md.IntegerField(blank=True, null=True)
assetable_id = md.IntegerField(blank=True, null=True)
assetable_type = md.CharField(max_length=30, blank=True, null=True)
type = md.CharField(max_length=30, blank=True, null=True)
width = md.IntegerField(blank=True, null=True)
height = md.IntegerField(blank=True, null=True)
created_at = md.DateTimeField(auto_now_add=True)
updated_at = md.DateTimeField(auto_now=True)
class Meta:
db_table = 'ckeditor_assets'
class SchemaMigrations(md.Model):
version = md.CharField(unique=True, max_length=255)
class Meta:
db_table = 'schema_migrations'
class UserSessions(md.Model):
created_at = md.DateTimeField(auto_now_add=True)
updated_at = md.DateTimeField(auto_now=True)
class Meta:
db_table = 'user_sessions'
| gpl-2.0 |
lnielsen/invenio | invenio/legacy/batchuploader/webinterface.py | 3 | 15955 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebUpload web interface"""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
from invenio.legacy.wsgi.utils import Field
from invenio.config import CFG_SITE_SECURE_URL
from invenio.utils.url import redirect_to_url
from invenio.base.i18n import gettext_set_language
from invenio.ext.legacy.handler import wash_urlargd, WebInterfaceDirectory
from invenio.utils.apache import SERVER_RETURN, HTTP_NOT_FOUND
from invenio.legacy.wsgi.utils import handle_file_post
from invenio.legacy.webuser import getUid, page_not_authorized, get_email
from invenio.legacy.webpage import page
from invenio.legacy.batchuploader.engine import metadata_upload, cli_upload, \
get_user_metadata_uploads, get_user_document_uploads, document_upload, \
get_daemon_doc_files, get_daemon_meta_files, cli_allocate_record, \
user_authorization, perform_upload_check, _transform_input_to_marcxml
try:
import invenio.legacy.template
batchuploader_templates = invenio.legacy.template.load('batchuploader')
except:
pass
class WebInterfaceBatchUploaderPages(WebInterfaceDirectory):
"""Defines the set of /batchuploader pages."""
_exports = ['', 'metadata', 'metasubmit', 'history', 'documents',
'docsubmit', 'daemon', 'allocaterecord', 'confirm']
def _lookup(self, component, path):
def restupload(req, form):
"""Interface for robots used like this:
$ curl --data-binary '@localfile.xml' http://cds.cern.ch/batchuploader/robotupload/[insert|replace|correct|append]?[callback_url=http://...]&nonce=1234 -A invenio_webupload
"""
filepath, mimetype = handle_file_post(req)
argd = wash_urlargd(form, {'callback_url': (str, None), 'nonce': (str, None), 'special_treatment': (str, None)})
return cli_upload(req, open(filepath), '--' + path[0], argd['callback_url'], argd['nonce'], argd['special_treatment'])
def legacyrobotupload(req, form):
"""Interface for robots used like this:
$ curl -F 'file=@localfile.xml' -F 'mode=-i' [-F 'callback_url=http://...'] [-F 'nonce=1234'] http://cds.cern.ch/batchuploader/robotupload -A invenio_webupload
"""
argd = wash_urlargd(form, {'mode': (str, None), 'callback_url': (str, None), 'nonce': (str, None), 'special_treatment': (str, None)})
return cli_upload(req, form.get('file', None), argd['mode'], argd['callback_url'], argd['nonce'], argd['special_treatment'])
if component == 'robotupload':
if path and path[0] in ('insert', 'replace', 'correct', 'append', 'insertorreplace'):
return restupload, None
else:
return legacyrobotupload, None
else:
return None, path
def index(self, req, form):
""" The function called by default
"""
redirect_to_url(req, "%s/batchuploader/metadata" % (CFG_SITE_SECURE_URL))
def metadata(self, req, form):
""" Display Metadata file upload form """
argd = wash_urlargd(form, { 'filetype': (str, ""),
'mode': (str, ""),
'submit_date': (str, "yyyy-mm-dd"),
'submit_time': (str, "hh:mm:ss"),
'email_logs_to': (str, None)})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uid = getUid(req)
if argd['email_logs_to'] is None:
argd['email_logs_to'] = get_email(uid)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="metadata")
body += batchuploader_templates.tmpl_display_web_metaupload_form(argd['ln'],
argd['filetype'], argd['mode'], argd['submit_date'],
argd['submit_time'], argd['email_logs_to'])
title = _("Metadata batch upload")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def documents(self, req, form):
""" Display document upload form """
argd = wash_urlargd(form, {
})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uid = getUid(req)
email_logs_to = get_email(uid)
body = batchuploader_templates.tmpl_display_menu(argd['ln'], ref="documents")
body += batchuploader_templates.tmpl_display_web_docupload_form(argd['ln'], email_logs_to=email_logs_to)
title = _("Document batch upload")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def docsubmit(self, req, form):
""" Function called after submitting the document upload form.
Performs the appropiate action depending on the input parameters
"""
argd = wash_urlargd(form, {'docfolder': (str, ""),
'matching': (str, ""),
'mode': (str, ""),
'submit_date': (str, ""),
'submit_time': (str, ""),
'priority': (str, ""),
'email_logs_to': (str, "")})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
date = argd['submit_date'] not in ['yyyy-mm-dd', ''] \
and argd['submit_date'] or ''
time = argd['submit_time'] not in ['hh:mm:ss', ''] \
and argd['submit_time'] or ''
errors, info = document_upload(req, argd['docfolder'], argd['matching'],
argd['mode'], date, time, argd['ln'], argd['priority'], argd['email_logs_to'])
body = batchuploader_templates.tmpl_display_menu(argd['ln'])
uid = getUid(req)
navtrail = '''<a class="navtrail" href="%s/batchuploader/documents">%s</a>''' % \
(CFG_SITE_SECURE_URL, _("Document batch upload"))
body += batchuploader_templates.tmpl_display_web_docupload_result(argd['ln'], errors, info)
title = _("Document batch upload result")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def allocaterecord(self, req, form):
"""
Interface for robots to allocate a record and obtain a record identifier
"""
return cli_allocate_record(req)
def metasubmit(self, req, form):
""" Function called after submitting the metadata upload form.
Checks if input fields are correct before uploading.
"""
argd = wash_urlargd(form, {'metafile': (str, None),
'filetype': (str, None),
'mode': (str, None),
'submit_date': (str, None),
'submit_time': (str, None),
'filename': (str, None),
'priority': (str, None),
'email_logs_to': (str, None)})
_ = gettext_set_language(argd['ln'])
# Check if the page is directly accessed
if argd['metafile'] == None:
redirect_to_url(req, "%s/batchuploader/metadata"
% (CFG_SITE_SECURE_URL))
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
date = argd['submit_date'] not in ['yyyy-mm-dd', ''] \
and argd['submit_date'] or ''
time = argd['submit_time'] not in ['hh:mm:ss', ''] \
and argd['submit_time'] or ''
auth_code, auth_message = metadata_upload(req,
argd['metafile'], argd['filetype'],
argd['mode'].split()[0],
date, time, argd['filename'], argd['ln'],
argd['priority'], argd['email_logs_to'])
if auth_code == 1: # not authorized
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'])
body += batchuploader_templates.tmpl_upload_successful(argd['ln'])
title = _("Upload successful")
navtrail = '''<a class="navtrail" href="%s/batchuploader/metadata">%s</a>''' % \
(CFG_SITE_SECURE_URL, _("Metadata batch upload"))
return page(title = title,
body = body,
uid = uid,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def confirm(self, req, form):
""" Function called after submitting the metadata upload form.
Shows a summary of actions to be performed and possible errors
"""
argd = wash_urlargd(form, {'metafile': (Field, None),
'filetype': (str, None),
'mode': (str, None),
'submit_date': (str, None),
'submit_time': (str, None),
'filename': (str, None),
'priority': (str, None),
'skip_simulation': (str, None),
'email_logs_to': (str, None)})
_ = gettext_set_language(argd['ln'])
# Check if the page is directly accessed or no file selected
if not argd['metafile']:
redirect_to_url(req, "%s/batchuploader/metadata"
% (CFG_SITE_SECURE_URL))
metafile = argd['metafile'].value
if argd['filetype'] != 'marcxml':
metafile = _transform_input_to_marcxml(file_input=metafile)
date = argd['submit_date'] not in ['yyyy-mm-dd', ''] \
and argd['submit_date'] or ''
time = argd['submit_time'] not in ['hh:mm:ss', ''] \
and argd['submit_time'] or ''
errors_upload = ''
skip_simulation = argd['skip_simulation'] == "skip"
if not skip_simulation:
errors_upload = perform_upload_check(metafile, argd['mode'])
body = batchuploader_templates.tmpl_display_confirm_page(argd['ln'],
metafile, argd['filetype'], argd['mode'], date,
time, argd['filename'], argd['priority'], errors_upload,
skip_simulation, argd['email_logs_to'])
uid = getUid(req)
navtrail = '''<a class="navtrail" href="%s/batchuploader/metadata">%s</a>''' % \
(CFG_SITE_SECURE_URL, _("Metadata batch upload"))
title = 'Confirm your actions'
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def history(self, req, form):
"""Display upload history of the current user"""
argd = wash_urlargd(form, {})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
uploaded_meta_files = get_user_metadata_uploads(req)
uploaded_doc_files = get_user_document_uploads(req)
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'],
ref="history")
body += batchuploader_templates.tmpl_upload_history(argd['ln'],
uploaded_meta_files,
uploaded_doc_files)
title = _("Upload history")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def daemon(self, req, form):
""" Display content of folders where the daemon will look into """
argd = wash_urlargd(form, {})
_ = gettext_set_language(argd['ln'])
not_authorized = user_authorization(req, argd['ln'])
if not_authorized:
return not_authorized
docs = get_daemon_doc_files()
metadata = get_daemon_meta_files()
uid = getUid(req)
body = batchuploader_templates.tmpl_display_menu(argd['ln'],
ref="daemon")
body += batchuploader_templates.tmpl_daemon_content(argd['ln'], docs,
metadata)
title = _("Batch Uploader: Daemon monitor")
return page(title = title,
body = body,
metaheaderadd = batchuploader_templates.tmpl_styles(),
uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
navmenuid = "batchuploader")
def __call__(self, req, form):
"""Redirect calls without final slash."""
redirect_to_url(req, '%s/batchuploader/metadata' % CFG_SITE_SECURE_URL)
| gpl-2.0 |
power12317/weblate | weblate/accounts/tests.py | 1 | 2955 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2013 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user handling.
"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group
from django.core import mail
from django.core.management import call_command
class RegistrationTest(TestCase):
def test_register(self):
response = self.client.post(
reverse('weblate_register'),
{
'username': 'username',
'email': 'noreply@weblate.org',
'password1': 'password',
'password2': 'password',
'first_name': 'First',
'last_name': 'Last',
}
)
# Check we did succeed
self.assertRedirects(response, reverse('registration_complete'))
# Check registration mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'Your registration on Weblate'
)
# Get confirmation URL from mail
line = ''
for line in mail.outbox[0].body.splitlines():
if line.startswith('http://example.com'):
break
# Confirm account
response = self.client.get(line[18:])
self.assertRedirects(
response,
reverse('registration_activation_complete')
)
user = User.objects.get(username='username')
# Verify user is active
self.assertTrue(user.is_active)
# Verify stored first/last name
self.assertEqual(user.first_name, 'First')
self.assertEqual(user.last_name, 'Last')
class CommandTest(TestCase):
'''
Tests for management commands.
'''
def test_createadmin(self):
call_command('createadmin')
user = User.objects.get(username='admin')
self.assertEqual(user.first_name, 'Weblate')
self.assertEqual(user.last_name, 'Admin')
def test_setupgroups(self):
call_command('setupgroups')
group = Group.objects.get(name='Users')
self.assertTrue(
group.permissions.filter(
codename='save_translation'
).exists()
)
| gpl-3.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/protorpc/messages.py | 11 | 59408 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stand-alone implementation of in memory protocol messages.
Public Classes:
Enum: Represents an enumerated type.
Variant: Hint for wire format to determine how to serialize.
Message: Base class for user defined messages.
IntegerField: Field for integer values.
FloatField: Field for float values.
BooleanField: Field for boolean values.
BytesField: Field for binary string values.
StringField: Field for UTF-8 string values.
MessageField: Field for other message type values.
EnumField: Field for enumerated type values.
Public Exceptions (indentation indications class hierarchy):
EnumDefinitionError: Raised when enumeration is incorrectly defined.
FieldDefinitionError: Raised when field is incorrectly defined.
InvalidVariantError: Raised when variant is not compatible with field type.
InvalidDefaultError: Raised when default is not compatiable with field.
InvalidNumberError: Raised when field number is out of range or reserved.
MessageDefinitionError: Raised when message is incorrectly defined.
DuplicateNumberError: Raised when field has duplicate number with another.
ValidationError: Raised when a message or field is not valid.
DefinitionNotFoundError: Raised when definition not found.
"""
import inspect
import os
import sys
import traceback
import types
import weakref
from . import util
__all__ = ['MAX_ENUM_VALUE',
'MAX_FIELD_NUMBER',
'FIRST_RESERVED_FIELD_NUMBER',
'LAST_RESERVED_FIELD_NUMBER',
'Enum',
'Field',
'FieldList',
'Variant',
'Message',
'IntegerField',
'FloatField',
'BooleanField',
'BytesField',
'StringField',
'MessageField',
'EnumField',
'find_definition',
'Error',
'DecodeError',
'EncodeError',
'EnumDefinitionError',
'FieldDefinitionError',
'InvalidVariantError',
'InvalidDefaultError',
'InvalidNumberError',
'MessageDefinitionError',
'DuplicateNumberError',
'ValidationError',
'DefinitionNotFoundError',
]
# TODO(user): Add extended module test to ensure all exceptions
# in services extends Error.
Error = util.Error
class EnumDefinitionError(Error):
"""Enumeration definition error."""
class FieldDefinitionError(Error):
"""Field definition error."""
class InvalidVariantError(FieldDefinitionError):
"""Invalid variant provided to field."""
class InvalidDefaultError(FieldDefinitionError):
"""Invalid default provided to field."""
class InvalidNumberError(FieldDefinitionError):
"""Invalid number provided to field."""
class MessageDefinitionError(Error):
"""Message definition error."""
class DuplicateNumberError(Error):
"""Duplicate number assigned to field."""
class DefinitionNotFoundError(Error):
"""Raised when definition is not found."""
class DecodeError(Error):
"""Error found decoding message from encoded form."""
class EncodeError(Error):
"""Error found when encoding message."""
class ValidationError(Error):
"""Invalid value for message error."""
def __str__(self):
"""Prints string with field name if present on exception."""
message = Error.__str__(self)
try:
field_name = self.field_name
except AttributeError:
return message
else:
return message
# Attributes that are reserved by a class definition that
# may not be used by either Enum or Message class definitions.
_RESERVED_ATTRIBUTE_NAMES = frozenset(
['__module__', '__doc__'])
_POST_INIT_FIELD_ATTRIBUTE_NAMES = frozenset(
['name',
'_message_definition',
'_MessageField__type',
'_EnumField__type',
'_EnumField__resolved_default'])
_POST_INIT_ATTRIBUTE_NAMES = frozenset(
['_message_definition'])
# Maximum enumeration value as defined by the protocol buffers standard.
# All enum values must be less than or equal to this value.
MAX_ENUM_VALUE = (2 ** 29) - 1
# Maximum field number as defined by the protocol buffers standard.
# All field numbers must be less than or equal to this value.
MAX_FIELD_NUMBER = (2 ** 29) - 1
# Field numbers between 19000 and 19999 inclusive are reserved by the
# protobuf protocol and may not be used by fields.
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
class _DefinitionClass(type):
"""Base meta-class used for definition meta-classes.
The Enum and Message definition classes share some basic functionality.
Both of these classes may be contained by a Message definition. After
initialization, neither class may have attributes changed
except for the protected _message_definition attribute, and that attribute
may change only once.
"""
__initialized = False
def __init__(cls, name, bases, dct):
"""Constructor."""
type.__init__(cls, name, bases, dct)
# Base classes may never be initialized.
if cls.__bases__ != (object,):
cls.__initialized = True
def message_definition(cls):
"""Get outer Message definition that contains this definition.
Returns:
Containing Message definition if definition is contained within one,
else None.
"""
try:
return cls._message_definition()
except AttributeError:
return None
def __setattr__(cls, name, value):
"""Overridden so that cannot set variables on definition classes after init.
Setting attributes on a class must work during the period of initialization
to set the enumation value class variables and build the name/number maps.
Once __init__ has set the __initialized flag to True prohibits setting any
more values on the class. The class is in effect frozen.
Args:
name: Name of value to set.
value: Value to set.
"""
if cls.__initialized and name not in _POST_INIT_ATTRIBUTE_NAMES:
raise AttributeError('May not change values: %s' % name)
else:
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""Overridden so that cannot delete varaibles on definition classes."""
raise TypeError('May not delete attributes on definition class')
def definition_name(cls):
"""Helper method for creating definition name.
Names will be generated to include the classes package name, scope (if the
class is nested in another definition) and class name.
By default, the package name for a definition is derived from its module
name. However, this value can be overriden by placing a 'package' attribute
in the module that contains the definition class. For example:
package = 'some.alternate.package'
class MyMessage(Message):
...
>>> MyMessage.definition_name()
some.alternate.package.MyMessage
Returns:
Dot-separated fully qualified name of definition.
"""
outer_definition_name = cls.outer_definition_name()
if outer_definition_name is None:
return unicode(cls.__name__)
else:
return u'%s.%s' % (outer_definition_name, cls.__name__)
def outer_definition_name(cls):
"""Helper method for creating outer definition name.
Returns:
If definition is nested, will return the outer definitions name, else the
package name.
"""
outer_definition = cls.message_definition()
if not outer_definition:
return util.get_package_for_module(cls.__module__)
else:
return outer_definition.definition_name()
def definition_package(cls):
"""Helper method for creating creating the package of a definition.
Returns:
Name of package that definition belongs to.
"""
outer_definition = cls.message_definition()
if not outer_definition:
return util.get_package_for_module(cls.__module__)
else:
return outer_definition.definition_package()
class _EnumClass(_DefinitionClass):
"""Meta-class used for defining the Enum base class.
Meta-class enables very specific behavior for any defined Enum
class. All attributes defined on an Enum sub-class must be integers.
Each attribute defined on an Enum sub-class is translated
into an instance of that sub-class, with the name of the attribute
as its name, and the number provided as its value. It also ensures
that only one level of Enum class hierarchy is possible. In other
words it is not possible to delcare sub-classes of sub-classes of
Enum.
This class also defines some functions in order to restrict the
behavior of the Enum class and its sub-classes. It is not possible
to change the behavior of the Enum class in later classes since
any new classes may be defined with only integer values, and no methods.
"""
def __init__(cls, name, bases, dct):
# Can only define one level of sub-classes below Enum.
if not (bases == (object,) or bases == (Enum,)):
raise EnumDefinitionError('Enum type %s may only inherit from Enum' %
(name,))
cls.__by_number = {}
cls.__by_name = {}
# Enum base class does not need to be initialized or locked.
if bases != (object,):
# Replace integer with number.
for attribute, value in dct.iteritems():
# Module will be in every enum class.
if attribute in _RESERVED_ATTRIBUTE_NAMES:
continue
# Reject anything that is not an int.
if not isinstance(value, (int, long)):
raise EnumDefinitionError(
'May only use integers in Enum definitions. Found: %s = %s' %
(attribute, value))
# Protocol buffer standard recommends non-negative values.
# Reject negative values.
if value < 0:
raise EnumDefinitionError(
'Must use non-negative enum values. Found: %s = %d' %
(attribute, value))
if value > MAX_ENUM_VALUE:
raise EnumDefinitionError(
'Must use enum values less than or equal %d. Found: %s = %d' %
(MAX_ENUM_VALUE, attribute, value))
if value in cls.__by_number:
raise EnumDefinitionError(
'Value for %s = %d is already defined: %s' %
(attribute, value, cls.__by_number[value].name))
# Create enum instance and list in new Enum type.
instance = object.__new__(cls)
cls.__init__(instance, attribute, value)
cls.__by_name[instance.name] = instance
cls.__by_number[instance.number] = instance
setattr(cls, attribute, instance)
_DefinitionClass.__init__(cls, name, bases, dct)
def __iter__(cls):
"""Iterate over all values of enum.
Yields:
Enumeration instances of the Enum class in arbitrary order.
"""
return cls.__by_number.itervalues()
def names(cls):
"""Get all names for Enum.
Returns:
An iterator for names of the enumeration in arbitrary order.
"""
return cls.__by_name.iterkeys()
def numbers(cls):
"""Get all numbers for Enum.
Returns:
An iterator for all numbers of the enumeration in arbitrary order.
"""
return cls.__by_number.iterkeys()
def lookup_by_name(cls, name):
"""Look up Enum by name.
Args:
name: Name of enum to find.
Returns:
Enum sub-class instance of that value.
"""
return cls.__by_name[name]
def lookup_by_number(cls, number):
"""Look up Enum by number.
Args:
number: Number of enum to find.
Returns:
Enum sub-class instance of that value.
"""
return cls.__by_number[number]
def __len__(cls):
return len(cls.__by_name)
class Enum(object):
"""Base class for all enumerated types."""
__metaclass__ = _EnumClass
__slots__ = set(('name', 'number'))
def __new__(cls, index):
"""Acts as look-up routine after class is initialized.
The purpose of overriding __new__ is to provide a way to treat
Enum subclasses as casting types, similar to how the int type
functions. A program can pass a string or an integer and this
method with "convert" that value in to an appropriate Enum instance.
Args:
index: Name or number to look up. During initialization
this is always the name of the new enum value.
Raises:
TypeError: When an inappropriate index value is passed provided.
"""
# If is enum type of this class, return it.
if isinstance(index, cls):
return index
# If number, look up by number.
if isinstance(index, (int, long)):
try:
return cls.lookup_by_number(index)
except KeyError:
pass
# If name, look up by name.
if isinstance(index, basestring):
try:
return cls.lookup_by_name(index)
except KeyError:
pass
raise TypeError('No such value for %s in Enum %s' %
(index, cls.__name__))
def __init__(self, name, number=None):
"""Initialize new Enum instance.
Since this should only be called during class initialization any
calls that happen after the class is frozen raises an exception.
"""
# Immediately return if __init__ was called after _Enum.__init__().
# It means that casting operator version of the class constructor
# is being used.
if getattr(type(self), '_DefinitionClass__initialized'):
return
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'number', number)
def __setattr__(self, name, value):
raise TypeError('May not change enum values')
def __str__(self):
return self.name
def __int__(self):
return self.number
def __repr__(self):
return '%s(%s, %d)' % (type(self).__name__, self.name, self.number)
def __reduce__(self):
"""Enable pickling.
Returns:
A 2-tuple containing the class and __new__ args to be used for restoring
a pickled instance.
"""
return self.__class__, (self.number,)
def __cmp__(self, other):
"""Order is by number."""
if isinstance(other, type(self)):
return cmp(self.number, other.number)
return NotImplemented
@classmethod
def to_dict(cls):
"""Make dictionary version of enumerated class.
Dictionary created this way can be used with def_num.
Returns:
A dict (name) -> number
"""
return dict((item.name, item.number) for item in iter(cls))
@staticmethod
def def_enum(dct, name):
"""Define enum class from dictionary.
Args:
dct: Dictionary of enumerated values for type.
name: Name of enum.
"""
return type(name, (Enum,), dct)
# TODO(user): Determine to what degree this enumeration should be compatible
# with FieldDescriptor.Type in:
#
# http://code.google.com/p/protobuf/source/browse/trunk/src/google/protobuf/descriptor.proto
class Variant(Enum):
"""Wire format variant.
Used by the 'protobuf' wire format to determine how to transmit
a single piece of data. May be used by other formats.
See: http://code.google.com/apis/protocolbuffers/docs/encoding.html
Values:
DOUBLE: 64-bit floating point number.
FLOAT: 32-bit floating point number.
INT64: 64-bit signed integer.
UINT64: 64-bit unsigned integer.
INT32: 32-bit signed integer.
BOOL: Boolean value (True or False).
STRING: String of UTF-8 encoded text.
MESSAGE: Embedded message as byte string.
BYTES: String of 8-bit bytes.
UINT32: 32-bit unsigned integer.
ENUM: Enum value as integer.
SINT32: 32-bit signed integer. Uses "zig-zag" encoding.
SINT64: 64-bit signed integer. Uses "zig-zag" encoding.
"""
DOUBLE = 1
FLOAT = 2
INT64 = 3
UINT64 = 4
INT32 = 5
BOOL = 8
STRING = 9
MESSAGE = 11
BYTES = 12
UINT32 = 13
ENUM = 14
SINT32 = 17
SINT64 = 18
class _MessageClass(_DefinitionClass):
"""Meta-class used for defining the Message base class.
For more details about Message classes, see the Message class docstring.
Information contained there may help understanding this class.
Meta-class enables very specific behavior for any defined Message
class. All attributes defined on an Message sub-class must be field
instances, Enum class definitions or other Message class definitions. Each
field attribute defined on an Message sub-class is added to the set of
field definitions and the attribute is translated in to a slot. It also
ensures that only one level of Message class hierarchy is possible. In other
words it is not possible to declare sub-classes of sub-classes of
Message.
This class also defines some functions in order to restrict the
behavior of the Message class and its sub-classes. It is not possible
to change the behavior of the Message class in later classes since
any new classes may be defined with only field, Enums and Messages, and
no methods.
"""
def __new__(cls, name, bases, dct):
"""Create new Message class instance.
The __new__ method of the _MessageClass type is overridden so as to
allow the translation of Field instances to slots.
"""
by_number = {}
by_name = {}
variant_map = {}
if bases != (object,):
# Can only define one level of sub-classes below Message.
if bases != (Message,):
raise MessageDefinitionError(
'Message types may only inherit from Message')
enums = []
messages = []
# Must not use iteritems because this loop will change the state of dct.
for key, field in dct.items():
if key in _RESERVED_ATTRIBUTE_NAMES:
continue
if isinstance(field, type) and issubclass(field, Enum):
enums.append(key)
continue
if (isinstance(field, type) and
issubclass(field, Message) and
field is not Message):
messages.append(key)
continue
# Reject anything that is not a field.
if type(field) is Field or not isinstance(field, Field):
raise MessageDefinitionError(
'May only use fields in message definitions. Found: %s = %s' %
(key, field))
if field.number in by_number:
raise DuplicateNumberError(
'Field with number %d declared more than once in %s' %
(field.number, name))
field.name = key
# Place in name and number maps.
by_name[key] = field
by_number[field.number] = field
# Add enums if any exist.
if enums:
dct['__enums__'] = sorted(enums)
# Add messages if any exist.
if messages:
dct['__messages__'] = sorted(messages)
dct['_Message__by_number'] = by_number
dct['_Message__by_name'] = by_name
return _DefinitionClass.__new__(cls, name, bases, dct)
def __init__(cls, name, bases, dct):
"""Initializer required to assign references to new class."""
if bases != (object,):
for value in dct.itervalues():
if isinstance(value, _DefinitionClass) and not value is Message:
value._message_definition = weakref.ref(cls)
for field in cls.all_fields():
field._message_definition = weakref.ref(cls)
_DefinitionClass.__init__(cls, name, bases, dct)
class Message(object):
"""Base class for user defined message objects.
Used to define messages for efficient transmission across network or
process space. Messages are defined using the field classes (IntegerField,
FloatField, EnumField, etc.).
Messages are more restricted than normal classes in that they may only
contain field attributes and other Message and Enum definitions. These
restrictions are in place because the structure of the Message class is
intentended to itself be transmitted across network or process space and
used directly by clients or even other servers. As such methods and
non-field attributes could not be transmitted with the structural information
causing discrepancies between different languages and implementations.
Initialization and validation:
A Message object is considered to be initialized if it has all required
fields and any nested messages are also initialized.
Calling 'check_initialized' will raise a ValidationException if it is not
initialized; 'is_initialized' returns a boolean value indicating if it is
valid.
Validation automatically occurs when Message objects are created
and populated. Validation that a given value will be compatible with
a field that it is assigned to can be done through the Field instances
validate() method. The validate method used on a message will check that
all values of a message and its sub-messages are valid. Assingning an
invalid value to a field will raise a ValidationException.
Example:
# Trade type.
class TradeType(Enum):
BUY = 1
SELL = 2
SHORT = 3
CALL = 4
class Lot(Message):
price = IntegerField(1, required=True)
quantity = IntegerField(2, required=True)
class Order(Message):
symbol = StringField(1, required=True)
total_quantity = IntegerField(2, required=True)
trade_type = EnumField(TradeType, 3, required=True)
lots = MessageField(Lot, 4, repeated=True)
limit = IntegerField(5)
order = Order(symbol='GOOG',
total_quantity=10,
trade_type=TradeType.BUY)
lot1 = Lot(price=304,
quantity=7)
lot2 = Lot(price = 305,
quantity=3)
order.lots = [lot1, lot2]
# Now object is initialized!
order.check_initialized()
"""
__metaclass__ = _MessageClass
def __init__(self, **kwargs):
"""Initialize internal messages state.
Args:
A message can be initialized via the constructor by passing in keyword
arguments corresponding to fields. For example:
class Date(Message):
day = IntegerField(1)
month = IntegerField(2)
year = IntegerField(3)
Invoking:
date = Date(day=6, month=6, year=1911)
is the same as doing:
date = Date()
date.day = 6
date.month = 6
date.year = 1911
"""
# Tag being an essential implementation detail must be private.
self.__tags = {}
self.__unrecognized_fields = {}
assigned = set()
for name, value in kwargs.iteritems():
setattr(self, name, value)
assigned.add(name)
# initialize repeated fields.
for field in self.all_fields():
if field.repeated and field.name not in assigned:
setattr(self, field.name, [])
def check_initialized(self):
"""Check class for initialization status.
Check that all required fields are initialized
Raises:
ValidationError: If message is not initialized.
"""
for name, field in self.__by_name.iteritems():
value = getattr(self, name)
if value is None:
if field.required:
raise ValidationError("Message %s is missing required field %s" %
(type(self).__name__, name))
else:
try:
if (isinstance(field, MessageField) and
issubclass(field.message_type, Message)):
if field.repeated:
for item in value:
item_message_value = field.value_to_message(item)
item_message_value.check_initialized()
else:
message_value = field.value_to_message(value)
message_value.check_initialized()
except ValidationError, err:
if not hasattr(err, 'message_name'):
err.message_name = type(self).__name__
raise
def is_initialized(self):
"""Get initialization status.
Returns:
True if message is valid, else False.
"""
try:
self.check_initialized()
except ValidationError:
return False
else:
return True
@classmethod
def all_fields(cls):
"""Get all field definition objects.
Ordering is arbitrary.
Returns:
Iterator over all values in arbitrary order.
"""
return cls.__by_name.itervalues()
@classmethod
def field_by_name(cls, name):
"""Get field by name.
Returns:
Field object associated with name.
Raises:
KeyError if no field found by that name.
"""
return cls.__by_name[name]
@classmethod
def field_by_number(cls, number):
"""Get field by number.
Returns:
Field object associated with number.
Raises:
KeyError if no field found by that number.
"""
return cls.__by_number[number]
def get_assigned_value(self, name):
"""Get the assigned value of an attribute.
Get the underlying value of an attribute. If value has not been set, will
not return the default for the field.
Args:
name: Name of attribute to get.
Returns:
Value of attribute, None if it has not been set.
"""
message_type = type(self)
try:
field = message_type.field_by_name(name)
except KeyError:
raise AttributeError('Message %s has no field %s' % (
message_type.__name__, name))
return self.__tags.get(field.number)
def reset(self, name):
"""Reset assigned value for field.
Resetting a field will return it to its default value or None.
Args:
name: Name of field to reset.
"""
message_type = type(self)
try:
field = message_type.field_by_name(name)
except KeyError:
if name not in message_type.__by_name:
raise AttributeError('Message %s has no field %s' % (
message_type.__name__, name))
if field.repeated:
self.__tags[field.number] = FieldList(field, [])
else:
self.__tags.pop(field.number, None)
def all_unrecognized_fields(self):
"""Get the names of all unrecognized fields in this message."""
return self.__unrecognized_fields.keys()
def get_unrecognized_field_info(self, key, value_default=None,
variant_default=None):
"""Get the value and variant of an unknown field in this message.
Args:
key: The name or number of the field to retrieve.
value_default: Value to be returned if the key isn't found.
variant_default: Value to be returned as variant if the key isn't
found.
Returns:
(value, variant), where value and variant are whatever was passed
to set_unrecognized_field.
"""
value, variant = self.__unrecognized_fields.get(key, (value_default,
variant_default))
return value, variant
def set_unrecognized_field(self, key, value, variant):
"""Set an unrecognized field, used when decoding a message.
Args:
key: The name or number used to refer to this unknown value.
value: The value of the field.
variant: Type information needed to interpret the value or re-encode it.
Raises:
TypeError: If the variant is not an instance of messages.Variant.
"""
if not isinstance(variant, Variant):
raise TypeError('Variant type %s is not valid.' % variant)
self.__unrecognized_fields[key] = value, variant
def __setattr__(self, name, value):
"""Change set behavior for messages.
Messages may only be assigned values that are fields.
Does not try to validate field when set.
Args:
name: Name of field to assign to.
vlaue: Value to assign to field.
Raises:
AttributeError when trying to assign value that is not a field.
"""
if name in self.__by_name or name.startswith('_Message__'):
object.__setattr__(self, name, value)
else:
raise AttributeError("May not assign arbitrary value %s "
"to message %s" % (name, type(self).__name__))
def __repr__(self):
"""Make string representation of message.
Example:
class MyMessage(messages.Message):
integer_value = messages.IntegerField(1)
string_value = messages.StringField(2)
my_message = MyMessage()
my_message.integer_value = 42
my_message.string_value = u'A string'
print my_message
>>> <MyMessage
... integer_value: 42
... string_value: u'A string'>
Returns:
String representation of message, including the values
of all fields and repr of all sub-messages.
"""
body = ['<', type(self).__name__]
for field in sorted(self.all_fields(),
key=lambda f: f.number):
attribute = field.name
value = self.get_assigned_value(field.name)
if value is not None:
body.append('\n %s: %s' % (attribute, repr(value)))
body.append('>')
return ''.join(body)
def __eq__(self, other):
"""Equality operator.
Does field by field comparison with other message. For
equality, must be same type and values of all fields must be
equal.
Messages not required to be initialized for comparison.
Does not attempt to determine equality for values that have
default values that are not set. In other words:
class HasDefault(Message):
attr1 = StringField(1, default='default value')
message1 = HasDefault()
message2 = HasDefault()
message2.attr1 = 'default value'
message1 != message2
Does not compare unknown values.
Args:
other: Other message to compare with.
"""
# TODO(user): Implement "equivalent" which does comparisons
# taking default values in to consideration.
if self is other:
return True
if type(self) is not type(other):
return False
return self.__tags == other.__tags
def __ne__(self, other):
"""Not equals operator.
Does field by field comparison with other message. For
non-equality, must be different type or any value of a field must be
non-equal to the same field in the other instance.
Messages not required to be initialized for comparison.
Args:
other: Other message to compare with.
"""
return not self.__eq__(other)
class FieldList(list):
"""List implementation that validates field values.
This list implementation overrides all methods that add values in to a list
in order to validate those new elements. Attempting to add or set list
values that are not of the correct type will raise ValidationError.
"""
def __init__(self, field_instance, sequence):
"""Constructor.
Args:
field_instance: Instance of field that validates the list.
sequence: List or tuple to construct list from.
"""
if not field_instance.repeated:
raise FieldDefinitionError('FieldList may only accept repeated fields')
self.__field = field_instance
self.__field.validate(sequence)
list.__init__(self, sequence)
def __getstate__(self):
"""Enable pickling.
The assigned field instance can't be pickled if it belongs to a Message
definition (message_definition uses a weakref), so the Message class and
field number are returned in that case.
Returns:
A 3-tuple containing:
- The field instance, or None if it belongs to a Message class.
- The Message class that the field instance belongs to, or None.
- The field instance number of the Message class it belongs to, or None.
"""
message_class = self.__field.message_definition()
if message_class is None:
return self.__field, None, None
else:
return None, message_class, self.__field.number
def __setstate__(self, state):
"""Enable unpickling.
Args:
state: A 3-tuple containing:
- The field instance, or None if it belongs to a Message class.
- The Message class that the field instance belongs to, or None.
- The field instance number of the Message class it belongs to, or None.
"""
field_instance, message_class, number = state
if field_instance is None:
self.__field = message_class.field_by_number(number)
else:
self.__field = field_instance
@property
def field(self):
"""Field that validates list."""
return self.__field
def __setslice__(self, i, j, sequence):
"""Validate slice assignment to list."""
self.__field.validate(sequence)
list.__setslice__(self, i, j, sequence)
def __setitem__(self, index, value):
"""Validate item assignment to list."""
self.__field.validate_element(value)
list.__setitem__(self, index, value)
def append(self, value):
"""Validate item appending to list."""
self.__field.validate_element(value)
return list.append(self, value)
def extend(self, sequence):
"""Validate extension of list."""
self.__field.validate(sequence)
return list.extend(self, sequence)
def insert(self, index, value):
"""Validate item insertion to list."""
self.__field.validate_element(value)
return list.insert(self, index, value)
# TODO(user): Prevent additional field subclasses.
class Field(object):
__variant_to_type = {}
class __metaclass__(type):
def __init__(cls, name, bases, dct):
getattr(cls, '_Field__variant_to_type').update(
(variant, cls) for variant in dct.get('VARIANTS', []))
type.__init__(cls, name, bases, dct)
__initialized = False
@util.positional(2)
def __init__(self,
number,
required=False,
repeated=False,
variant=None,
default=None):
"""Constructor.
The required and repeated parameters are mutually exclusive. Setting both
to True will raise a FieldDefinitionError.
Sub-class Attributes:
Each sub-class of Field must define the following:
VARIANTS: Set of variant types accepted by that field.
DEFAULT_VARIANT: Default variant type if not specified in constructor.
Args:
number: Number of field. Must be unique per message class.
required: Whether or not field is required. Mutually exclusive with
'repeated'.
repeated: Whether or not field is repeated. Mutually exclusive with
'required'.
variant: Wire-format variant hint.
default: Default value for field if not found in stream.
Raises:
InvalidVariantError when invalid variant for field is provided.
InvalidDefaultError when invalid default for field is provided.
FieldDefinitionError when invalid number provided or mutually exclusive
fields are used.
InvalidNumberError when the field number is out of range or reserved.
"""
if not isinstance(number, int) or not 1 <= number <= MAX_FIELD_NUMBER:
raise InvalidNumberError('Invalid number for field: %s\n'
'Number must be 1 or greater and %d or less' %
(number, MAX_FIELD_NUMBER))
if FIRST_RESERVED_FIELD_NUMBER <= number <= LAST_RESERVED_FIELD_NUMBER:
raise InvalidNumberError('Tag number %d is a reserved number.\n'
'Numbers %d to %d are reserved' %
(number, FIRST_RESERVED_FIELD_NUMBER,
LAST_RESERVED_FIELD_NUMBER))
if repeated and required:
raise FieldDefinitionError('Cannot set both repeated and required')
if variant is None:
variant = self.DEFAULT_VARIANT
if repeated and default is not None:
raise FieldDefinitionError('Repeated fields may not have defaults')
if variant not in self.VARIANTS:
raise InvalidVariantError(
'Invalid variant: %s\nValid variants for %s are %r' %
(variant, type(self).__name__, sorted(self.VARIANTS)))
self.number = number
self.required = required
self.repeated = repeated
self.variant = variant
if default is not None:
try:
self.validate_default(default)
except ValidationError, err:
try:
name = self.name
except AttributeError:
# For when raising error before name initialization.
raise InvalidDefaultError('Invalid default value for %s: %s: %s' %
(self.__class__.__name__, default, err))
else:
raise InvalidDefaultError('Invalid default value for field %s: '
'%s: %s' % (name, default, err))
self.__default = default
self.__initialized = True
def __setattr__(self, name, value):
"""Setter overidden to prevent assignment to fields after creation.
Args:
name: Name of attribute to set.
value: Value to assign.
"""
# Special case post-init names. They need to be set after constructor.
if name in _POST_INIT_FIELD_ATTRIBUTE_NAMES:
object.__setattr__(self, name, value)
return
# All other attributes must be set before __initialized.
if not self.__initialized:
# Not initialized yet, allow assignment.
object.__setattr__(self, name, value)
else:
raise AttributeError('Field objects are read-only')
def __set__(self, message_instance, value):
"""Set value on message.
Args:
message_instance: Message instance to set value on.
value: Value to set on message.
"""
# Reaches in to message instance directly to assign to private tags.
if value is None:
if self.repeated:
raise ValidationError(
'May not assign None to repeated field %s' % self.name)
else:
message_instance._Message__tags.pop(self.number, None)
else:
if self.repeated:
value = FieldList(self, value)
else:
self.validate(value)
message_instance._Message__tags[self.number] = value
def __get__(self, message_instance, message_class):
if message_instance is None:
return self
result = message_instance._Message__tags.get(self.number)
if result is None:
return self.default
else:
return result
def validate_element(self, value):
"""Validate single element of field.
This is different from validate in that it is used on individual
values of repeated fields.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected type.
"""
if not isinstance(value, self.type):
if value is None:
if self.required:
raise ValidationError('Required field is missing')
else:
try:
name = self.name
except AttributeError:
raise ValidationError('Expected type %s for %s, '
'found %s (type %s)' %
(self.type, self.__class__.__name__,
value, type(value)))
else:
raise ValidationError('Expected type %s for field %s, '
'found %s (type %s)' %
(self.type, name, value, type(value)))
def __validate(self, value, validate_element):
"""Internal validation function.
Validate an internal value using a function to validate individual elements.
Args:
value: Value to validate.
validate_element: Function to use to validate individual elements.
Raises:
ValidationError if value is not expected type.
"""
if not self.repeated:
validate_element(value)
else:
# Must be a list or tuple, may not be a string.
if isinstance(value, (list, tuple)):
for element in value:
if element is None:
try:
name = self.name
except AttributeError:
raise ValidationError('Repeated values for %s '
'may not be None' % self.__class__.__name__)
else:
raise ValidationError('Repeated values for field %s '
'may not be None' % name)
validate_element(element)
elif value is not None:
try:
name = self.name
except AttributeError:
raise ValidationError('%s is repeated. Found: %s' % (
self.__class__.__name__, value))
else:
raise ValidationError('Field %s is repeated. Found: %s' % (name,
value))
def validate(self, value):
"""Validate value assigned to field.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected type.
"""
self.__validate(value, self.validate_element)
def validate_default_element(self, value):
"""Validate value as assigned to field default field.
Some fields may allow for delayed resolution of default types necessary
in the case of circular definition references. In this case, the default
value might be a place holder that is resolved when needed after all the
message classes are defined.
Args:
value: Default value to validate.
Raises:
ValidationError if value is not expected type.
"""
self.validate_element(value)
def validate_default(self, value):
"""Validate default value assigned to field.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected type.
"""
self.__validate(value, self.validate_default_element)
def message_definition(self):
"""Get Message definition that contains this Field definition.
Returns:
Containing Message definition for Field. Will return None if for
some reason Field is defined outside of a Message class.
"""
try:
return self._message_definition()
except AttributeError:
return None
@property
def default(self):
"""Get default value for field."""
return self.__default
@classmethod
def lookup_field_type_by_variant(cls, variant):
return cls.__variant_to_type[variant]
class IntegerField(Field):
"""Field definition for integer values."""
VARIANTS = frozenset([Variant.INT32,
Variant.INT64,
Variant.UINT32,
Variant.UINT64,
Variant.SINT32,
Variant.SINT64,
])
DEFAULT_VARIANT = Variant.INT64
type = (int, long)
class FloatField(Field):
"""Field definition for float values."""
VARIANTS = frozenset([Variant.FLOAT,
Variant.DOUBLE,
])
DEFAULT_VARIANT = Variant.DOUBLE
type = float
class BooleanField(Field):
"""Field definition for boolean values."""
VARIANTS = frozenset([Variant.BOOL])
DEFAULT_VARIANT = Variant.BOOL
type = bool
class BytesField(Field):
"""Field definition for byte string values."""
VARIANTS = frozenset([Variant.BYTES])
DEFAULT_VARIANT = Variant.BYTES
type = str
class StringField(Field):
"""Field definition for unicode string values."""
VARIANTS = frozenset([Variant.STRING])
DEFAULT_VARIANT = Variant.STRING
type = unicode
def validate_element(self, value):
"""Validate StringField allowing for str and unicode.
Raises:
ValidationError if a str value is not 7-bit ascii.
"""
# If value is str is it considered valid. Satisfies "required=True".
if isinstance(value, str):
try:
unicode(value)
except UnicodeDecodeError, err:
try:
name = self.name
except AttributeError:
validation_error = ValidationError(
'Field encountered non-ASCII string %s: %s' % (value,
err))
else:
validation_error = ValidationError(
'Field %s encountered non-ASCII string %s: %s' % (self.name,
value,
err))
validation_error.field_name = self.name
raise validation_error
else:
super(StringField, self).validate_element(value)
class MessageField(Field):
"""Field definition for sub-message values.
Message fields contain instance of other messages. Instances stored
on messages stored on message fields are considered to be owned by
the containing message instance and should not be shared between
owning instances.
Message fields must be defined to reference a single type of message.
Normally message field are defined by passing the referenced message
class in to the constructor.
It is possible to define a message field for a type that does not yet
exist by passing the name of the message in to the constructor instead
of a message class. Resolution of the actual type of the message is
deferred until it is needed, for example, during message verification.
Names provided to the constructor must refer to a class within the same
python module as the class that is using it. Names refer to messages
relative to the containing messages scope. For example, the two fields
of OuterMessage refer to the same message type:
class Outer(Message):
inner_relative = MessageField('Inner', 1)
inner_absolute = MessageField('Outer.Inner', 2)
class Inner(Message):
...
When resolving an actual type, MessageField will traverse the entire
scope of nested messages to match a message name. This makes it easy
for siblings to reference siblings:
class Outer(Message):
class Inner(Message):
sibling = MessageField('Sibling', 1)
class Sibling(Message):
...
"""
VARIANTS = frozenset([Variant.MESSAGE])
DEFAULT_VARIANT = Variant.MESSAGE
@util.positional(3)
def __init__(self,
message_type,
number,
required=False,
repeated=False,
variant=None):
"""Constructor.
Args:
message_type: Message type for field. Must be subclass of Message.
number: Number of field. Must be unique per message class.
required: Whether or not field is required. Mutually exclusive to
'repeated'.
repeated: Whether or not field is repeated. Mutually exclusive to
'required'.
variant: Wire-format variant hint.
Raises:
FieldDefinitionError when invalid message_type is provided.
"""
valid_type = (isinstance(message_type, basestring) or
(message_type is not Message and
isinstance(message_type, type) and
issubclass(message_type, Message)))
if not valid_type:
raise FieldDefinitionError('Invalid message class: %s' % message_type)
if isinstance(message_type, basestring):
self.__type_name = message_type
self.__type = None
else:
self.__type = message_type
super(MessageField, self).__init__(number,
required=required,
repeated=repeated,
variant=variant)
def __set__(self, message_instance, value):
"""Set value on message.
Args:
message_instance: Message instance to set value on.
value: Value to set on message.
"""
message_type = self.type
if isinstance(message_type, type) and issubclass(message_type, Message):
if self.repeated:
if value and isinstance(value, (list, tuple)):
value = [(message_type(**v) if isinstance(v, dict) else v)
for v in value]
elif isinstance(value, dict):
value = message_type(**value)
super(MessageField, self).__set__(message_instance, value)
@property
def type(self):
"""Message type used for field."""
if self.__type is None:
message_type = find_definition(self.__type_name, self.message_definition())
if not (message_type is not Message and
isinstance(message_type, type) and
issubclass(message_type, Message)):
raise FieldDefinitionError('Invalid message class: %s' % message_type)
self.__type = message_type
return self.__type
@property
def message_type(self):
"""Underlying message type used for serialization.
Will always be a sub-class of Message. This is different from type
which represents the python value that message_type is mapped to for
use by the user.
"""
return self.type
def value_from_message(self, message):
"""Convert a message to a value instance.
Used by deserializers to convert from underlying messages to
value of expected user type.
Args:
message: A message instance of type self.message_type.
Returns:
Value of self.message_type.
"""
if not isinstance(message, self.message_type):
raise DecodeError('Expected type %s, got %s: %r' %
(self.message_type.__name__,
type(message).__name__,
message))
return message
def value_to_message(self, value):
"""Convert a value instance to a message.
Used by serializers to convert Python user types to underlying
messages for transmission.
Args:
value: A value of type self.type.
Returns:
An instance of type self.message_type.
"""
if not isinstance(value, self.type):
raise EncodeError('Expected type %s, got %s: %r' %
(self.type.__name__,
type(value).__name__,
value))
return value
class EnumField(Field):
"""Field definition for enum values.
Enum fields may have default values that are delayed until the associated enum
type is resolved. This is necessary to support certain circular references.
For example:
class Message1(Message):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
# This field default value will be validated when default is accessed.
animal = EnumField('Message2.Animal', 1, default='HORSE')
class Message2(Message):
class Animal(Enum):
DOG = 1
CAT = 2
HORSE = 3
# This fields default value will be validated right away since Color is
# already fully resolved.
color = EnumField(Message1.Color, 1, default='RED')
"""
VARIANTS = frozenset([Variant.ENUM])
DEFAULT_VARIANT = Variant.ENUM
def __init__(self, enum_type, number, **kwargs):
"""Constructor.
Args:
enum_type: Enum type for field. Must be subclass of Enum.
number: Number of field. Must be unique per message class.
required: Whether or not field is required. Mutually exclusive to
'repeated'.
repeated: Whether or not field is repeated. Mutually exclusive to
'required'.
variant: Wire-format variant hint.
default: Default value for field if not found in stream.
Raises:
FieldDefinitionError when invalid enum_type is provided.
"""
valid_type = (isinstance(enum_type, basestring) or
(enum_type is not Enum and
isinstance(enum_type, type) and
issubclass(enum_type, Enum)))
if not valid_type:
raise FieldDefinitionError('Invalid enum type: %s' % enum_type)
if isinstance(enum_type, basestring):
self.__type_name = enum_type
self.__type = None
else:
self.__type = enum_type
super(EnumField, self).__init__(number, **kwargs)
def validate_default_element(self, value):
"""Validate default element of Enum field.
Enum fields allow for delayed resolution of default values when the type
of the field has not been resolved. The default value of a field may be
a string or an integer. If the Enum type of the field has been resolved,
the default value is validated against that type.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected message type.
"""
if isinstance(value, (basestring, int, long)):
# Validation of the value does not happen for delayed resolution
# enumerated types. Ignore if type is not yet resolved.
if self.__type:
self.__type(value)
return
super(EnumField, self).validate_default_element(value)
@property
def type(self):
"""Enum type used for field."""
if self.__type is None:
found_type = find_definition(self.__type_name, self.message_definition())
if not (found_type is not Enum and
isinstance(found_type, type) and
issubclass(found_type, Enum)):
raise FieldDefinitionError('Invalid enum type: %s' % found_type)
self.__type = found_type
return self.__type
@property
def default(self):
"""Default for enum field.
Will cause resolution of Enum type and unresolved default value.
"""
try:
return self.__resolved_default
except AttributeError:
resolved_default = super(EnumField, self).default
if isinstance(resolved_default, (basestring, int, long)):
resolved_default = self.type(resolved_default)
self.__resolved_default = resolved_default
return self.__resolved_default
@util.positional(2)
def find_definition(name, relative_to=None, importer=__import__):
"""Find definition by name in module-space.
The find algorthm will look for definitions by name relative to a message
definition or by fully qualfied name. If no definition is found relative
to the relative_to parameter it will do the same search against the container
of relative_to. If relative_to is a nested Message, it will search its
message_definition(). If that message has no message_definition() it will
search its module. If relative_to is a module, it will attempt to look for
the containing module and search relative to it. If the module is a top-level
module, it will look for the a message using a fully qualified name. If
no message is found then, the search fails and DefinitionNotFoundError is
raised.
For example, when looking for any definition 'foo.bar.ADefinition' relative to
an actual message definition abc.xyz.SomeMessage:
find_definition('foo.bar.ADefinition', SomeMessage)
It is like looking for the following fully qualified names:
abc.xyz.SomeMessage. foo.bar.ADefinition
abc.xyz. foo.bar.ADefinition
abc. foo.bar.ADefinition
foo.bar.ADefinition
When resolving the name relative to Message definitions and modules, the
algorithm searches any Messages or sub-modules found in its path.
Non-Message values are not searched.
A name that begins with '.' is considered to be a fully qualified name. The
name is always searched for from the topmost package. For example, assume
two message types:
abc.xyz.SomeMessage
xyz.SomeMessage
Searching for '.xyz.SomeMessage' relative to 'abc' will resolve to
'xyz.SomeMessage' and not 'abc.xyz.SomeMessage'. For this kind of name,
the relative_to parameter is effectively ignored and always set to None.
For more information about package name resolution, please see:
http://code.google.com/apis/protocolbuffers/docs/proto.html#packages
Args:
name: Name of definition to find. May be fully qualified or relative name.
relative_to: Search for definition relative to message definition or module.
None will cause a fully qualified name search.
importer: Import function to use for resolving modules.
Returns:
Enum or Message class definition associated with name.
Raises:
DefinitionNotFoundError if no definition is found in any search path.
"""
# Check parameters.
if not (relative_to is None or
isinstance(relative_to, types.ModuleType) or
isinstance(relative_to, type) and issubclass(relative_to, Message)):
raise TypeError('relative_to must be None, Message definition or module. '
'Found: %s' % relative_to)
name_path = name.split('.')
# Handle absolute path reference.
if not name_path[0]:
relative_to = None
name_path = name_path[1:]
def search_path():
"""Performs a single iteration searching the path from relative_to.
This is the function that searches up the path from a relative object.
fully.qualified.object . relative.or.nested.Definition
---------------------------->
^
|
this part of search --+
Returns:
Message or Enum at the end of name_path, else None.
"""
next = relative_to
for node in name_path:
# Look for attribute first.
attribute = getattr(next, node, None)
if attribute is not None:
next = attribute
else:
# If module, look for sub-module.
if next is None or isinstance(next, types.ModuleType):
if next is None:
module_name = node
else:
module_name = '%s.%s' % (next.__name__, node)
try:
fromitem = module_name.split('.')[-1]
next = importer(module_name, '', '', [str(fromitem)])
except ImportError:
return None
else:
return None
if (not isinstance(next, types.ModuleType) and
not (isinstance(next, type) and
issubclass(next, (Message, Enum)))):
return None
return next
while True:
found = search_path()
if isinstance(found, type) and issubclass(found, (Enum, Message)):
return found
else:
# Find next relative_to to search against.
#
# fully.qualified.object . relative.or.nested.Definition
# <---------------------
# ^
# |
# does this part of search
if relative_to is None:
# Fully qualified search was done. Nothing found. Fail.
raise DefinitionNotFoundError('Could not find definition for %s'
% (name,))
else:
if isinstance(relative_to, types.ModuleType):
# Find parent module.
module_path = relative_to.__name__.split('.')[:-1]
if not module_path:
relative_to = None
else:
# Should not raise ImportError. If it does... weird and
# unexepected. Propagate.
relative_to = importer(
'.'.join(module_path), '', '', [module_path[-1]])
elif (isinstance(relative_to, type) and
issubclass(relative_to, Message)):
parent = relative_to.message_definition()
if parent is None:
last_module_name = relative_to.__module__.split('.')[-1]
relative_to = importer(
relative_to.__module__, '', '', [last_module_name])
else:
relative_to = parent
| apache-2.0 |
allenp/odoo | addons/l10n_fr_hr_payroll/__openerp__.py | 24 | 1219 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
| gpl-3.0 |
JoshRosen/spark | python/pyspark/mllib/stat/__init__.py | 131 | 1197 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for statistical functions in MLlib.
"""
from pyspark.mllib.stat._statistics import *
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.stat.test import ChiSqTestResult
from pyspark.mllib.stat.KernelDensity import KernelDensity
__all__ = ["Statistics", "MultivariateStatisticalSummary", "ChiSqTestResult",
"MultivariateGaussian", "KernelDensity"]
| apache-2.0 |
MehtapIsik/bayesian-itc | data/auto-iTC-200/053014/automation.py | 3 | 5653 | #==============================================================================
# GLOBAL IMPORTS
#==============================================================================
import simtk.unit as units
#==============================================================================
# SOLVENT
#==============================================================================
class Solvent(object):
"""
A Solvent object represents a liquid that may be pipetted, and in which compounds may be dissolved.
"""
def __init__(self, name, density=None):
"""
Parameters
----------
name : str
The name of the solvent to create.
density : simtk.unit.Quantity with units compatible with grams/milliliter, optional, default=None
The density of the solvent.
Examples
--------
Register a solvent.
>>> water = Solvent('water', density=0.9970479*units.grams/units.centimeter**3)
Register a solvent with density information.
>>> dmso = Solvent('dmso', density=1.1004*units.grams/units.centimeter**3)
"""
self.name = name
self.density = density
#==============================================================================
# COMPOUND
#==============================================================================
class Compound(object):
"""
A Compound object represents a compound that can be dissolved in a solvent.
"""
def __init__(self, name, molecular_weight=None, purity=1.0):
"""
Parameters
----------
name : str
The name of the compound to create.
molecular_weight : simtk.unit.Quantity with units compatible with grams/mole, optional, default=None
The molecular weight of the compound.
purity : float, optional, default=1.0
The mass purity used for computing actual quantity of compound.
Examples
--------
Register a compound.
>>> nacl = Compound('sodium chloride')
Register a compound with molecular weight.
>>> imatinib = Compound('imatinib mesylate', molecular_weight=589.7*units.grams/units.mole)
Use a non-unit purity.
>>> compound1 = Compound('compound1', molecular_weight=209.12*units.grams/units.mole, purity=0.975)
"""
self.name = name
self.molecular_weight = molecular_weight
self.purity = purity
#==============================================================================
# PIPETTING LOCATION
#==============================================================================
class PipettingLocation(object):
def __init__(self, RackLabel, RackType, Position):
# Information for Tecan LiHa.
self.RackLabel = RackLabel
self.RackType = RackType
self.Position = Position
#==============================================================================
# SOLUTION
#==============================================================================
class SimpleSolution(Solvent):
"""
A SimpleSolution object represents a solution containing one compound and one solvent.
The solution is assumed to be ideal, with the same volume as that of the solvent.
"""
def __init__(self, compound, compound_mass, solvent, solvent_mass, location):
"""
compound : Compound
The compound added to the solution.
compound_mass : simtk.unit.Quantity compatible with grams
The mass of compound added to the solution.
solvent : Solvent
The solvent used for the solution.
solvent_mass : simtk.unit.Quantity compatible with grams
The mass of solvent used for the solution.
location : PipettingLocation
The pipetting location holding the solution.
Examples
--------
Create a simple salt solution.
>>> salt = Compound('sodium chloride', molecular_weight=58.44277*units.grams/units.mole)
>>> water = Solvent('water', density=0.9970479*units.grams/units.centimeter**3)
>>> location = PipettingLocation('BufferTrough', 'Trough 100ml', 1)
>>> solution = SimpleSolution(compound=salt, compound_mass=1.0*units.milligrams, solvent=water, solvent_mass=10.0*units.grams, location=location)
TODO
----
* Allow specification of quantity of compound and solvent in various ways (mass, moles, volume) with automated conversions.
"""
self.compound = compound
self.compound_mass = compound_mass
self.solvent = solvent
self.solvent_mass = solvent_mass
self.name = compound.name
# Compute total solution mass.
self.solution_mass = self.compound_mass + self.solvent_mass
# Assume solution is ideal; that density and volume is same as solvent.
self.density = solvent.density
self.volume = solvent_mass / solvent.density
# Compute number of moles of compound.
self.compound_moles = compound_mass / compound.molecular_weight * compound.purity # number of moles of compound
# Compute molarity.
self.concentration = self.compound_moles / self.volume
# Store location.
self.location = location
#==============================================================================
# MAIN AND TESTS
#==============================================================================
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
stone5495/NewsBlur | vendor/feedvalidator/content.py | 16 | 6021 | """$Id: content.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import *
from logging import *
#
# item element.
#
class textConstruct(validatorBase,rfc2396,nonhtml):
from validators import mime_re
import re
def getExpectedAttrNames(self):
return [(None, u'type'),(None, u'src')]
def normalizeWhitespace(self):
pass
def maptype(self):
if self.type.find('/') > -1:
self.log(InvalidTextType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
def prevalidate(self):
if self.attrs.has_key((None,"src")):
self.type=''
else:
self.type='text'
if self.getFeedType() == TYPE_RSS2 and self.name != 'atom_summary':
self.log(DuplicateDescriptionSemantics({"element":self.name}))
if self.attrs.has_key((None,"type")):
self.type=self.attrs.getValue((None,"type"))
if not self.type:
self.log(AttrNotBlank({"parent":self.parent.name, "element":self.name, "attr":"type"}))
self.maptype()
if self.attrs.has_key((None,"src")):
self.children.append(True) # force warnings about "mixed" content
self.value=self.attrs.getValue((None,"src"))
rfc2396.validate(self, errorClass=InvalidURIAttribute, extraParams={"attr": "src"})
self.value=""
if not self.attrs.has_key((None,"type")):
self.log(MissingTypeAttr({"parent":self.parent.name, "element":self.name, "attr":"type"}))
if self.type in ['text','html','xhtml'] and not self.attrs.has_key((None,"src")):
pass
elif self.type and not self.mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"xml:lang"}))
def validate(self):
if self.type in ['text','xhtml']:
if self.type=='xhtml':
nonhtml.validate(self, NotInline)
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
else:
if self.type.find('/') > -1 and not (
self.type.endswith('+xml') or self.type.endswith('/xml') or
self.type.startswith('text/')):
import base64
try:
self.value=base64.decodestring(self.value)
if self.type.endswith('/html'): self.type='html'
except:
self.log(NotBase64({"parent":self.parent.name, "element":self.name,"value":self.value}))
if self.type=='html' or self.type.endswith("/html"):
self.validateSafe(self.value)
if self.type.endswith("/html"):
if self.value.find("<html")<0 and not self.attrs.has_key((None,"src")):
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
else:
nonhtml.validate(self, ContainsUndeclaredHTML)
if not self.value and len(self.children)==0 and not self.attrs.has_key((None,"src")):
self.log(NotBlank({"parent":self.parent.name, "element":self.name}))
def textOK(self):
if self.children: validatorBase.textOK(self)
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
if (self.type=='xhtml') and string.strip() and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
validatorBase.characters(self,string)
def startElementNS(self, name, qname, attrs):
if (self.type<>'xhtml') and not (
self.type.endswith('+xml') or self.type.endswith('/xml')):
self.log(UndefinedElement({"parent":self.name, "element":name}))
if self.type=="xhtml":
if name<>'div' and not self.value.strip():
self.log(MissingXhtmlDiv({"parent":self.parent.name, "element":self.name}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.type=="application/xhtml+xml":
if name<>'html':
self.log(HtmlFragment({"parent":self.parent.name, "element":self.name,"value":self.value, "type":self.type}))
elif qname not in ["http://www.w3.org/1999/xhtml"]:
self.log(NotHtml({"parent":self.parent.name, "element":self.name, "message":"unexpected namespace: %s" % qname}))
if self.attrs.has_key((None,"mode")):
if self.attrs.getValue((None,"mode")) == 'escaped':
self.log(NotEscaped({"parent":self.parent.name, "element":self.name}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
handler=diveater()
else:
handler=eater()
self.children.append(handler)
self.push(handler, name, attrs)
# treat xhtml:div as part of the content for purposes of detecting escaped html
class diveater(eater):
def __init__(self):
eater.__init__(self)
self.mixed = False
def textOK(self):
pass
def characters(self, string):
validatorBase.characters(self, string)
def startElementNS(self, name, qname, attrs):
if not qname:
self.log(MissingNamespace({"parent":"xhtml:div", "element":name}))
self.mixed = True
eater.startElementNS(self, name, qname, attrs)
def validate(self):
if not self.mixed: self.parent.value += self.value
class content(textConstruct):
def maptype(self):
if self.type == 'multipart/alternative':
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
| mit |
paiser/component-management | storm/sqlobject.py | 1 | 17342 | #
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <gustavo@niemeyer.net>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""A SQLObject emulation layer for Storm.
L{SQLObjectBase} is the central point of compatibility.
"""
import re
from storm.properties import (
RawStr, Int, Bool, Float, DateTime, Date, TimeDelta)
from storm.references import Reference, ReferenceSet
from storm.properties import SimpleProperty, PropertyPublisherMeta
from storm.variables import Variable
from storm.exceptions import StormError
from storm.info import get_cls_info
from storm.store import Store
from storm.base import Storm
from storm.expr import SQL, SQLRaw, Desc, And, Or, Not, In, Like
from storm.tz import tzutc
from storm import Undef
__all__ = ["SQLObjectBase", "StringCol", "IntCol", "BoolCol", "FloatCol",
"DateCol", "UtcDateTimeCol", "IntervalCol", "ForeignKey",
"SQLMultipleJoin", "SQLRelatedJoin", "DESC", "AND", "OR",
"NOT", "IN", "LIKE", "SQLConstant", "SQLObjectNotFound",
"CONTAINSSTRING"]
DESC, AND, OR, NOT, IN, LIKE, SQLConstant = Desc, And, Or, Not, In, Like, SQL
_IGNORED = object()
class SQLObjectNotFound(StormError):
pass
class SQLObjectStyle(object):
longID = False
def idForTable(self, table_name):
if self.longID:
return self.tableReference(table_name)
else:
return 'id'
def pythonClassToAttr(self, class_name):
return self._lowerword(class_name)
def instanceAttrToIDAttr(self, attr_name):
return attr_name + "ID"
def pythonAttrToDBColumn(self, attr_name):
return self._mixed_to_under(attr_name)
def dbColumnToPythonAttr(self, column_name):
return self._under_to_mixed(column_name)
def pythonClassToDBTable(self, class_name):
return class_name[0].lower()+self._mixed_to_under(class_name[1:])
def dbTableToPythonClass(self, table_name):
return table_name[0].upper()+self._under_to_mixed(table_name[1:])
def pythonClassToDBTableReference(self, class_name):
return self.tableReference(self.pythonClassToDBTable(class_name))
def tableReference(self, table_name):
return table_name+"_id"
def _mixed_to_under(self, name, _re=re.compile(r'[A-Z]+')):
if name.endswith('ID'):
return self._mixed_to_under(name[:-2]+"_id")
name = _re.sub(self._mixed_to_under_sub, name)
if name.startswith('_'):
return name[1:]
return name
def _mixed_to_under_sub(self, match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def _under_to_mixed(self, name, _re=re.compile('_.')):
if name.endswith('_id'):
return self._under_to_mixed(name[:-3] + "ID")
return _re.sub(self._under_to_mixed_sub, name)
def _under_to_mixed_sub(self, match):
return match.group(0)[1].upper()
@staticmethod
def _capword(s):
return s[0].upper() + s[1:]
@staticmethod
def _lowerword(s):
return s[0].lower() + s[1:]
class SQLObjectMeta(PropertyPublisherMeta):
@staticmethod
def _get_attr(attr, bases, dict):
value = dict.get(attr)
if value is None:
for base in bases:
value = getattr(base, attr, None)
if value is not None:
break
return value
def __new__(cls, name, bases, dict):
if Storm in bases or SQLObjectBase in bases:
# Do not parse abstract base classes.
return type.__new__(cls, name, bases, dict)
style = cls._get_attr("_style", bases, dict)
if style is None:
dict["_style"] = style = SQLObjectStyle()
table_name = cls._get_attr("_table", bases, dict)
if table_name is None:
table_name = style.pythonClassToDBTable(name)
id_name = cls._get_attr("_idName", bases, dict)
if id_name is None:
id_name = style.idForTable(table_name)
# Handle this later to call _parse_orderBy() on the created class.
default_order = cls._get_attr("_defaultOrder", bases, dict)
dict["__storm_table__"] = table_name
attr_to_prop = {}
for attr, prop in dict.items():
attr_to_prop[attr] = attr
if isinstance(prop, ForeignKey):
db_name = prop.kwargs.get("dbName", attr)
local_prop_name = style.instanceAttrToIDAttr(attr)
dict[local_prop_name] = local_prop = Int(db_name)
dict[attr] = Reference(local_prop,
"%s.<primary key>" % prop.foreignKey)
attr_to_prop[attr] = local_prop_name
elif isinstance(prop, PropertyAdapter):
db_name = prop.dbName or attr
method_name = prop.alternateMethodName
if method_name is None and prop.alternateID:
method_name = "by" + db_name[0].upper() + db_name[1:]
if method_name is not None:
def func(cls, key, attr=attr):
store = cls._get_store()
obj = store.find(cls, getattr(cls, attr) == key).one()
if obj is None:
raise SQLObjectNotFound
return obj
func.func_name = method_name
dict[method_name] = classmethod(func)
id_type = dict.get("_idType", int)
id_cls = {int: Int, str: RawStr, unicode: AutoUnicode}[id_type]
dict[id_name] = id_cls(primary=True)
# Notice that obj is the class since this is the metaclass.
obj = super(SQLObjectMeta, cls).__new__(cls, name, bases, dict)
property_registry = obj._storm_property_registry
property_registry.add_property(obj, getattr(obj, id_name),
"<primary key>")
for fake_name, real_name in attr_to_prop.items():
prop = getattr(obj, real_name)
if fake_name != real_name:
property_registry.add_property(obj, prop, fake_name)
attr_to_prop[fake_name] = prop
obj._attr_to_prop = attr_to_prop
if default_order is not None:
cls_info = get_cls_info(obj)
cls_info.default_order = obj._parse_orderBy(default_order)
return obj
class DotQ(object):
"""A descriptor that mimics the SQLObject 'Table.q' syntax"""
def __get__(self, obj, cls=None):
return BoundDotQ(cls)
class BoundDotQ(object):
def __init__(self, cls):
self._cls = cls
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError(attr)
elif attr == 'id':
cls_info = get_cls_info(self._cls)
return cls_info.primary_key[0]
else:
return getattr(self._cls, attr)
class SQLObjectBase(Storm):
"""The root class of all SQLObject-emulating classes in your application.
The general strategy for using Storm's SQLObject emulation layer
is to create an application-specific subclass of SQLObjectBase
(probably named "SQLObject") that provides an implementation of
_get_store to return an instance of L{storm.store.Store}. It may
even be implemented as returning a global L{Store} instance. Then
all database classes should subclass that class.
"""
__metaclass__ = SQLObjectMeta
q = DotQ()
def __init__(self, *args, **kwargs):
self._get_store().add(self)
self._create(None, **kwargs)
def __storm_loaded__(self):
self._init(None)
def _init(self, id, *args, **kwargs):
pass
def _create(self, _id_, **kwargs):
self.set(**kwargs)
self._init(None)
def set(self, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
def destroySelf(self):
Store.of(self).remove(self)
@staticmethod
def _get_store():
raise NotImplementedError("SQLObjectBase._get_store() "
"must be implemented")
@classmethod
def delete(cls, id):
# destroySelf() should be extended to support cascading, so
# we'll mimic what SQLObject does here, even if more expensive.
obj = cls.get(id)
obj.destroySelf()
@classmethod
def get(cls, id):
store = cls._get_store()
obj = store.get(cls, id)
if obj is None:
raise SQLObjectNotFound("Object not found")
return obj
@classmethod
def _parse_orderBy(cls, orderBy):
result = []
if not isinstance(orderBy, (tuple, list)):
orderBy = (orderBy,)
for item in orderBy:
if isinstance(item, basestring):
desc = item.startswith("-")
if desc:
item = item[1:]
item = cls._attr_to_prop.get(item, item)
if desc:
item = Desc(item)
result.append(item)
return tuple(result)
@classmethod
def _find(cls, clause=None, clauseTables=None, orderBy=None,
limit=None, distinct=None, prejoins=_IGNORED,
prejoinClauseTables=_IGNORED, _by={}):
store = cls._get_store()
if clause is None:
args = ()
else:
args = (clause,)
if clauseTables is not None:
clauseTables = set(table.lower() for table in clauseTables)
clauseTables.add(cls.__storm_table__.lower())
store = store.using(*clauseTables)
result = store.find(cls, *args, **_by)
if orderBy is not None:
result.order_by(*cls._parse_orderBy(orderBy))
result.config(limit=limit, distinct=distinct)
return result
@classmethod
def select(cls, *args, **kwargs):
result = cls._find(*args, **kwargs)
return SQLObjectResultSet(result, cls)
@classmethod
def selectBy(cls, orderBy=None, **kwargs):
result = cls._find(orderBy=orderBy, _by=kwargs)
return SQLObjectResultSet(result, cls)
@classmethod
def selectOne(cls, *args, **kwargs):
return cls._find(*args, **kwargs).one()
@classmethod
def selectOneBy(cls, **kwargs):
return cls._find(_by=kwargs).one()
@classmethod
def selectFirst(cls, *args, **kwargs):
return cls._find(*args, **kwargs).first()
@classmethod
def selectFirstBy(cls, orderBy=None, **kwargs):
return cls._find(orderBy=orderBy, _by=kwargs).first()
# Dummy methods.
def sync(self): pass
def syncUpdate(self): pass
class SQLObjectResultSet(object):
def __init__(self, result_set, cls):
self._result_set = result_set
self._cls = cls
def count(self):
return self._result_set.count()
def __iter__(self):
return self._result_set.__iter__()
def __getitem__(self, index):
result_set = self._result_set[index]
if isinstance(index, slice):
return self.__class__(result_set, self._cls)
return result_set
def __nonzero__(self):
return self._result_set.any() is not None
def orderBy(self, orderBy):
result_set = self._result_set.copy()
result_set.order_by(*self._cls._parse_orderBy(orderBy))
return self.__class__(result_set, self._cls)
def limit(self, limit):
result_set = self._result_set.copy().config(limit=limit)
return self.__class__(result_set, self._cls)
def distinct(self):
result_set = self._result_set.copy().config(distinct=True)
result_set.order_by() # Remove default order.
return self.__class__(result_set, self._cls)
def union(self, otherSelect, unionAll=False, orderBy=None):
result_set = self._result_set.union(otherSelect._result_set,
all=unionAll)
result_set.order_by() # Remove default order.
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def except_(self, otherSelect, exceptAll=False, orderBy=None):
result_set = self._result_set.difference(otherSelect._result_set,
all=exceptAll)
result_set.order_by() # Remove default order.
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def intersect(self, otherSelect, intersectAll=False, orderBy=None):
result_set = self._result_set.intersection(otherSelect._result_set,
all=intersectAll)
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def prejoin(self, prejoins):
return self
def prejoinClauseTables(self, prejoinClauseTables):
return self
class PropertyAdapter(object):
_kwargs = {}
def __init__(self, dbName=None, notNull=False, default=Undef,
alternateID=None, unique=_IGNORED, name=_IGNORED,
alternateMethodName=None, length=_IGNORED, immutable=None,
prejoins=_IGNORED):
if default is None and notNull:
raise RuntimeError("Can't use default=None and notNull=True")
self.dbName = dbName
self.alternateID = alternateID
self.alternateMethodName = alternateMethodName
# XXX Implement handler for:
#
# - immutable (causes setting the attribute to fail)
#
# XXX Implement tests for ignored parameters:
#
# - unique (for tablebuilder)
# - length (for tablebuilder for StringCol)
# - name (for _columns stuff)
# - prejoins
if callable(default):
default_factory = default
default = Undef
else:
default_factory = Undef
super(PropertyAdapter, self).__init__(dbName, allow_none=not notNull,
default_factory=default_factory,
default=default, **self._kwargs)
class AutoUnicodeVariable(Variable):
"""Unlike UnicodeVariable, this will try to convert str to unicode."""
def parse_set(self, value, from_db):
if not isinstance(value, basestring):
raise TypeError("Expected basestring, found %s" % repr(type(value)))
return unicode(value)
class AutoUnicode(SimpleProperty):
variable_class = AutoUnicodeVariable
class StringCol(PropertyAdapter, AutoUnicode):
pass
class IntCol(PropertyAdapter, Int):
pass
class BoolCol(PropertyAdapter, Bool):
pass
class FloatCol(PropertyAdapter, Float):
pass
class UtcDateTimeCol(PropertyAdapter, DateTime):
_kwargs = {"tzinfo": tzutc()}
class DateCol(PropertyAdapter, Date):
pass
class IntervalCol(PropertyAdapter, TimeDelta):
pass
class ForeignKey(object):
def __init__(self, foreignKey, **kwargs):
self.foreignKey = foreignKey
self.kwargs = kwargs
class SQLMultipleJoin(ReferenceSet):
def __init__(self, otherClass=None, joinColumn=None,
intermediateTable=None, otherColumn=None, orderBy=None,
prejoins=_IGNORED):
if intermediateTable:
args = ("<primary key>",
"%s.%s" % (intermediateTable, joinColumn),
"%s.%s" % (intermediateTable, otherColumn),
"%s.<primary key>" % otherClass)
else:
args = ("<primary key>", "%s.%s" % (otherClass, joinColumn))
ReferenceSet.__init__(self, *args)
self._orderBy = orderBy
def __get__(self, obj, cls=None):
if obj is None:
return self
bound_reference_set = ReferenceSet.__get__(self, obj)
target_cls = bound_reference_set._target_cls
result_set = bound_reference_set.find()
if self._orderBy:
result_set.order_by(*target_cls._parse_orderBy(self._orderBy))
return SQLObjectResultSet(result_set, target_cls)
SQLRelatedJoin = SQLMultipleJoin
class CONTAINSSTRING(Like):
def __init__(self, expr, string):
string = string.replace("!", "!!") \
.replace("_", "!_") \
.replace("%", "!%")
Like.__init__(self, expr, "%"+string+"%", SQLRaw("'!'"))
| gpl-2.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/third_party/prompt_toolkit/contrib/completers/base.py | 23 | 2272 | from __future__ import unicode_literals
from six import string_types
from prompt_toolkit.completion import Completer, Completion
__all__ = (
'WordCompleter',
)
class WordCompleter(Completer):
"""
Simple autocompletion on a list of words.
:param words: List of words.
:param ignore_case: If True, case-insensitive completion.
:param meta_dict: Optional dict mapping words to their meta-information.
:param WORD: When True, use WORD characters.
:param sentence: When True, don't complete by comparing the word before the
cursor, but by comparing all the text before the cursor. In this case,
the list of words is just a list of strings, where each string can
contain spaces. (Can not be used together with the WORD option.)
:param match_middle: When True, match not only the start, but also in the
middle of the word.
"""
def __init__(self, words, ignore_case=False, meta_dict=None, WORD=False,
sentence=False, match_middle=False):
assert not (WORD and sentence)
self.words = list(words)
self.ignore_case = ignore_case
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.sentence = sentence
self.match_middle = match_middle
assert all(isinstance(w, string_types) for w in self.words)
def get_completions(self, document, complete_event):
# Get word/text before cursor.
if self.sentence:
word_before_cursor = document.text_before_cursor
else:
word_before_cursor = document.get_word_before_cursor(WORD=self.WORD)
if self.ignore_case:
word_before_cursor = word_before_cursor.lower()
def word_matches(word):
""" True when the word before the cursor matches. """
if self.ignore_case:
word = word.lower()
if self.match_middle:
return word_before_cursor in word
else:
return word.startswith(word_before_cursor)
for a in self.words:
if word_matches(a):
display_meta = self.meta_dict.get(a, '')
yield Completion(a, -len(word_before_cursor), display_meta=display_meta)
| apache-2.0 |
seanr/nnwo | profiles/ableorganizer/libraries/openlayers/tools/jsmin.py | 513 | 7471 | #!/usr/bin/python
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-01-08
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if a '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = None
self.thaB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)
| gpl-2.0 |
miaoski/stripTLD | notld.py | 1 | 1279 | # -*- coding: utf8 -*-
import sys
try:
import cPickle as pickle
except:
import pickle
with open('tld.pkl', 'rb') as f:
TLD_LIST = pickle.loads(f.read())
def isIP(s):
import re
IP_RE = re.compile(r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')
return IP_RE.match(s) is not None
def trimTLD(domain):
"www.some-domain.com -> www.some-domain"
global TLD_LIST
if isIP(domain):
return domain
if domain.find('.') == -1:
return domain
xs = domain.split('.')
cc = TLD_LIST
while True:
if xs[-1] not in cc.keys():
return '.'.join(xs)
ld = xs.pop()
cc = cc[ld]
def midDomain(domain):
"www.some-domain.com -> some-domain"
n = trimTLD(domain)
if n.find('.') == -1:
return n
else:
return n.split('.')[-1]
def runSelfTest():
testDomains = [
'a.c.appier.net',
'view.atdmt.com',
'api.facebook.com',
'class.ruten.com.tw',
'ajax.googleapis.com',
'test.co.uk',
'dic.yahoo.jp',
'www.test.bungaku.ac.jp',
'test.1.bg',
'secondlevel.bg',
'myhotel.gonohe.aomori.jp',
]
for i in testDomains:
print i + '\t' + trimTLD(i) + '\t' + midDomain(i)
if __name__ == '__main__':
import sys
for x in sys.stdin:
i = x.strip()
print i + '\t' + trimTLD(i) + '\t' + midDomain(i)
| mit |
cluckmaster/MissionPlanner | Lib/profile.py | 50 | 24085 | #! /usr/bin/env python
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "help", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
# Backwards compatibility.
def help():
print "Documentation for the profile module can be found "
print "in the Python Library Reference, section 'The Python Profiler'."
if hasattr(os, "times"):
def _get_time_times(timer=os.times):
t = timer()
return t[0] + t[1]
# Using getrusage(3) is better than clock(3) if available:
# on some systems (e.g. FreeBSD), getrusage has a higher resolution
# Furthermore, on a POSIX system, returns microseconds, which
# wrap around after 36min.
_has_res = 0
try:
import resource
resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
def _get_time_resource(timer=resgetrusage):
t = timer()
return t[0] + t[1]
_has_res = 1
except ImportError:
pass
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
if _has_res:
self.timer = resgetrusage
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_resource
elif hasattr(time, 'clock'):
self.timer = self.get_time = time.clock
self.dispatcher = self.trace_dispatch_i
elif hasattr(os, 'times'):
self.timer = os.times
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_times
else:
self.timer = self.get_time = time.time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.iteritems():
callers = callers.copy()
nc = 0
for callcnt in callers.itervalues():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec cmd in globals, locals
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print "elapsed time without profiling =", elapsed_noprofile
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print "elapsed time with profiling =", elapsed_profile
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print "'CPU seconds' profiler reported =", reported_time
print "total # calls =", total_calls
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print "mean stopwatch overhead per profile event =", mean
return mean
#****************************************************************************
def Stats(*args):
print 'Report generating functions are in the "pstats" module\a'
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| gpl-3.0 |
mverrilli/python_snippets | sql_functions.py | 1 | 6181 | import re
def regexp_flags(regexp_modifier=None):
flags = 0
set_unicode = True
for c in regexp_modifier:
if c == 'b':
# Treat strings as binary octets rather than UTF-8 characters.
set_unicode = False
elif c == 'c':
# Forces the match to be case sensitive.
# This is the default, no flag setting needed
pass
elif c == 'i':
# Forces the match to be case insensitive.
flags |= re.IGNORECASE
elif c == 'm':
# Treats the string being matched as multiple lines. With this
# modifier, the start of line (^) and end of line ($) regular
# expression operators match line breaks (\n) within the string.
# Ordinarily, these operators only match the start and end of the
# string.
flags |= re.MULTILINE
elif c == 'n':
# Allows the single character regular expression operator (.) to
# match a newline (\n). Normally, the . operator will match any
# character except a newline.
flags |= re.DOTALL
elif c == 'x':
# Allows you to document your regular expressions. It causes all
# unescaped space characters and comments in the regular expression
# to be ignored. Comments start with a hash character (#) and end
# with a newline. All spaces in the regular expression that you
# want to be matched in strings must be escaped with a backslash
# (\) character.
flags |= re.VERBOSE
if set_unicode:
flags |= re.UNICODE
return flags
def sql_regexp_substr(txt, pattern, position=1, occurrence=1, regexp_modifier='c', captured_subexp=0):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
return match.group(captured_subexp)
except IndexError:
return ''
return ''
def sql_regexp_count(txt, pattern, position=1, regexp_modifier='c'):
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.findall(txt, position-1)
return len(matches)
def sql_regexp_like(txt, pattern, regexp_modifier='c'):
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt)
for match in matches:
return True
return False
def sql_regexp_instr( txt, pattern, position=1, occurrence=1, return_position=0, regexp_modifier='c', captured_subexp=0):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
if return_position > 0:
return match.end(captured_subexp) + return_position
else:
return match.start(captured_subexp) + 1
except IndexError:
return 0
return 0
def sql_regexp_replace( txt, target, replacement=None, position=1, occurrence=1, regexp_modifier='c'):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
retval = rx.sub(replacement, txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
return match.group(captured_subexp)
except IndexError:
return ''
return ''
def sql_regexp_replace( txt, pattern, replacement='', position=1, occurrence=0, regexp_modifier='c'):
class ReplWrapper(object):
def __init__(self, replacement, occurrence):
self.count = 0
self.replacement = replacement
self.occurrence = occurrence
def repl(self, match):
self.count += 1
if self.occurrence == 0 or self.occurrence == self.count:
return match.expand(self.replacement)
else:
try:
return match.group(0)
except IndexError:
return match.group(0)
occurrence = 0 if occurrence < 0 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
replw = ReplWrapper(replacement, occurrence)
return txt[0:position-1] + rx.sub(replw.repl, txt[position-1:])
def sql_left(txt, i):
return txt[:i]
def sql_right(txt, i):
return txt[i-1:]
def sql_split_part(txt, delim, field):
try:
return txt.split(delim)[field-1]
except IndexError:
return ''
def sql_substr(txt, pos, extent=None):
if extent is not None:
return txt[pos-1:pos-1+extent]
elif extent == 0:
return ''
else:
return txt[pos-1:]
def sql_instr(txt, subtxt, pos=1, occurrence=1):
cnt = 0
while cnt < occurrence:
idx = txt.find(subtxt, pos - 1)
if idx == -1:
return 0
cnt += 1
pos = idx + 2 # pos is used starting at 1, plus need the next char
return idx + 1
def sql_concat(txt1, txt2):
return '' if txt1 is None else None + '' if txt2 is None else None
def sql_nvl(expr1, expr2):
if expr1 is None or expr1 == '':
return expr2
return expr1
def sql_nvl2(expr1, expr2, expr3):
if expr1 is None or expr1 == '':
return expr3
return expr2
def sql_null_if_zero(expr):
if expr is None or expr == 0:
return ''
def sql_zero_if_null(expr):
if expr is None or expr == '':
return 0
def sql_coalesce(*argv):
for arg in argv:
if arg is not None and arg != '':
return arg
return ''
def sql_decode(expr, *argv):
for test_expr, retval in zip(argv[::2], argv[1::2]):
if expr == test_expr:
return retval
if len(argv) % 2 == 1:
return argv[-1]
return ''
| mit |
taaviteska/django | tests/annotations/models.py | 90 | 2591 | from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher, models.CASCADE)
pubdate = models.DateField()
def __str__(self):
return self.name
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
class DepartmentStore(Store):
chain = models.CharField(max_length=255)
def __str__(self):
return '%s - %s ' % (self.chain, self.name)
class Employee(models.Model):
# The order of these fields matter, do not change. Certain backends
# rely on field ordering to perform database conversions, and this
# model helps to test that.
first_name = models.CharField(max_length=20)
manager = models.BooleanField(default=False)
last_name = models.CharField(max_length=20)
store = models.ForeignKey(Store, models.CASCADE)
age = models.IntegerField()
salary = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Company(models.Model):
name = models.CharField(max_length=200)
motto = models.CharField(max_length=200, null=True, blank=True)
ticker_name = models.CharField(max_length=10, null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return 'Company(name=%s, motto=%s, ticker_name=%s, description=%s)' % (
self.name, self.motto, self.ticker_name, self.description,
)
class Ticket(models.Model):
active_at = models.DateTimeField()
duration = models.DurationField()
def __str__(self):
return '{} - {}'.format(self.active_at, self.duration)
| bsd-3-clause |
EmanueleCannizzaro/scons | test/LoadableModule.py | 1 | 3712 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/LoadableModule.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import sys
import TestCmd
import TestSCons
dll_ = TestSCons.dll_
_dll = TestSCons._dll
test = TestSCons.TestSCons()
# Some systems apparently need -ldl on the link line, others don't.
no_dl_lib = "env.Program(target = 'dlopenprog', source = 'dlopenprog.c')"
use_dl_lib = "env.Program(target = 'dlopenprog', source = 'dlopenprog.c', LIBS=['dl'])"
dlopen_line = {
'darwin' : no_dl_lib,
'freebsd4' : no_dl_lib,
'linux2' : use_dl_lib,
'linux3' : use_dl_lib,
}
platforms_with_dlopen = list(dlopen_line.keys())
test.write('SConstruct', """
env = Environment()
# dlopenprog tries to dynamically load foo1 at runtime using dlopen().
env.LoadableModule(target = 'foo1', source = 'f1.c')
""" + dlopen_line.get(sys.platform, ''))
test.write('f1.c', r"""
#include <stdio.h>
void
f1(void)
{
printf("f1.c\n");
fflush(stdout);
}
""")
dlopenprog = r"""
#include <errno.h>
#include <stdio.h>
#include <dlfcn.h>
extern int errno;
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
void *foo1_shobj = dlopen("__foo1_name__", RTLD_NOW);
if(!foo1_shobj){
printf("Error loading foo1 '__foo1_name__' library at runtime, exiting.\n");
printf("%d\n", errno);
perror("");
return -1;
}
void (*f1)() = dlsym(foo1_shobj, "f1\0");
(*f1)();
printf("dlopenprog.c\n");
dlclose(foo1_shobj);
return 0;
}
"""
# Darwin dlopen()s a bundle named "foo1",
# other systems dlopen() a traditional libfoo1.so file.
foo1_name = {'darwin' : 'foo1'}.get(sys.platform[:6], dll_+'foo1'+_dll)
test.write('dlopenprog.c',
dlopenprog.replace('__foo1_name__', foo1_name))
test.run(arguments = '.',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
# TODO: Add new Intel-based Macs? Why are we only picking on Macs?
#if sys.platform.find('darwin') != -1:
# test.run(program='/usr/bin/file',
# arguments = "foo1",
# match = TestCmd.match_re,
# stdout="foo1: Mach-O bundle (ppc|i386)\n")
# My laptop prints "foo1: Mach-O 64-bit bundle x86_64"
if sys.platform in platforms_with_dlopen:
os.environ['LD_LIBRARY_PATH'] = test.workpath()
test.run(program = test.workpath('dlopenprog'),
stdout = "f1.c\ndlopenprog.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
lmprice/ansible | lib/ansible/plugins/action/service.py | 29 | 3802 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
UNUSED_PARAMS = {
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
}
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.service_mgr}}')
except:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
if not module or module == 'auto' or module not in self._shared_loader_obj.module_loader:
module = 'service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
# for backwards compatibility
if 'state' in new_module_args and new_module_args['state'] == 'running':
self._display.deprecated(msg="state=running is deprecated. Please use state=started", version="2.7")
new_module_args['state'] = 'started'
if module in self.UNUSED_PARAMS:
for unused in self.UNUSED_PARAMS[module]:
if unused in new_module_args:
del new_module_args[unused]
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
self._display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| gpl-3.0 |
kennethlove/django | tests/regressiontests/utils/decorators.py | 43 | 3919 | from django.http import HttpResponse
from django.template import Template, Context
from django.template.response import TemplateResponse
from django.test import TestCase, RequestFactory
from django.utils.decorators import decorator_from_middleware
class ProcessViewMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
pass
process_view_dec = decorator_from_middleware(ProcessViewMiddleware)
@process_view_dec
def process_view(request):
return HttpResponse()
class ClassProcessView(object):
def __call__(self, request):
return HttpResponse()
class_process_view = process_view_dec(ClassProcessView())
class FullMiddleware(object):
def process_request(self, request):
request.process_request_reached = True
def process_view(sef, request, view_func, view_args, view_kwargs):
request.process_view_reached = True
def process_template_response(self, request, response):
request.process_template_response_reached = True
return response
def process_response(self, request, response):
# This should never receive unrendered content.
request.process_response_content = response.content
request.process_response_reached = True
return response
full_dec = decorator_from_middleware(FullMiddleware)
class DecoratorFromMiddlewareTests(TestCase):
"""
Tests for view decorators created using
``django.utils.decorators.decorator_from_middleware``.
"""
rf = RequestFactory()
def test_process_view_middleware(self):
"""
Test a middleware that implements process_view.
"""
process_view(self.rf.get('/'))
def test_callable_process_view_middleware(self):
"""
Test a middleware that implements process_view, operating on a callable class.
"""
class_process_view(self.rf.get('/'))
def test_full_dec_normal(self):
"""
Test that all methods of middleware are called for normal HttpResponses
"""
@full_dec
def normal_view(request):
t = Template("Hello world")
return HttpResponse(t.render(Context({})))
request = self.rf.get('/')
response = normal_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
# process_template_response must not be called for HttpResponse
self.assertFalse(getattr(request, 'process_template_response_reached', False))
self.assertTrue(getattr(request, 'process_response_reached', False))
def test_full_dec_templateresponse(self):
"""
Test that all methods of middleware are called for TemplateResponses in
the right sequence.
"""
@full_dec
def template_response_view(request):
t = Template("Hello world")
return TemplateResponse(request, t, {})
request = self.rf.get('/')
response = template_response_view(request)
self.assertTrue(getattr(request, 'process_request_reached', False))
self.assertTrue(getattr(request, 'process_view_reached', False))
self.assertTrue(getattr(request, 'process_template_response_reached', False))
# response must not be rendered yet.
self.assertFalse(response._is_rendered)
# process_response must not be called until after response is rendered,
# otherwise some decorators like csrf_protect and gzip_page will not
# work correctly. See #16004
self.assertFalse(getattr(request, 'process_response_reached', False))
response.render()
self.assertTrue(getattr(request, 'process_response_reached', False))
# Check that process_response saw the rendered content
self.assertEqual(request.process_response_content, "Hello world")
| bsd-3-clause |
luminusnetworks/flask-restplus | examples/todo_blueprint.py | 4 | 2559 | from flask import Flask, Blueprint
from flask.ext.restplus import Api, Resource, fields
api_v1 = Blueprint('api', __name__, url_prefix='/api/1')
api = Api(api_v1, version='1.0', title='Todo API',
description='A simple TODO API extracted from the original flask-restful example',
)
ns = api.namespace('todos', description='TODO operations')
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'task': 'profit!'},
}
todo = api.model('Todo', {
'task': fields.String(required=True, description='The task details')
})
listed_todo = api.model('ListedTodo', {
'id': fields.String(required=True, description='The todo ID'),
'todo': fields.Nested(todo, description='The Todo')
})
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
api.abort(404, "Todo {} doesn't exist".format(todo_id))
parser = api.parser()
parser.add_argument('task', type=str, required=True, help='The task details', location='form')
@ns.route('/<string:todo_id>')
@api.doc(responses={404: 'Todo not found'}, params={'todo_id': 'The Todo ID'})
class Todo(Resource):
'''Show a single todo item and lets you delete them'''
@api.doc(description='todo_id should be in {0}'.format(', '.join(TODOS.keys())))
@api.marshal_with(todo)
def get(self, todo_id):
'''Fetch a given resource'''
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
@api.doc(responses={204: 'Todo deleted'})
def delete(self, todo_id):
'''Delete a given resource'''
abort_if_todo_doesnt_exist(todo_id)
del TODOS[todo_id]
return '', 204
@api.doc(parser=parser)
@api.marshal_with(todo)
def put(self, todo_id):
'''Update a given resource'''
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task
@ns.route('/')
class TodoList(Resource):
'''Shows a list of all todos, and lets you POST to add new tasks'''
@api.marshal_list_with(listed_todo)
def get(self):
'''List all todos'''
return [{'id': id, 'todo': todo} for id, todo in TODOS.items()]
@api.doc(parser=parser)
@api.marshal_with(todo, code=201)
def post(self):
'''Create a todo'''
args = parser.parse_args()
todo_id = 'todo%d' % (len(TODOS) + 1)
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
if __name__ == '__main__':
app = Flask(__name__)
app.register_blueprint(api_v1)
app.run(debug=True)
| mit |
haveal/googleads-python-lib | examples/adwords/v201502/basic_operations/get_campaigns_with_awql.py | 4 | 1943 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all campaigns with AWQL.
To add a campaign, run add_campaign.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import time
from googleads import adwords
PAGE_SIZE = 100
def main(client):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201502')
# Construct query and get all campaigns.
offset = 0
query = 'SELECT Id, Name, Status ORDER BY Name'
more_pages = True
while more_pages:
page = campaign_service.query(query + ' LIMIT %s, %s' % (offset, PAGE_SIZE))
# Display results.
if 'entries' in page:
for campaign in page['entries']:
print ('Campaign with id \'%s\', name \'%s\', and status \'%s\' was '
'found.' % (campaign['id'], campaign['name'],
campaign['status']))
else:
print 'No campaigns were found.'
offset += PAGE_SIZE
more_pages = offset < int(page['totalNumEntries'])
time.sleep(1)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| apache-2.0 |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py | 675 | 11294 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol version HyBi 00.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import logging
import re
import struct
from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket import util
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_default_port
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import parse_host_header
from mod_pywebsocket.handshake._base import validate_mandatory_header
_MANDATORY_HEADERS = [
# key, expected value or None
[common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
[common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
]
def _validate_subprotocol(subprotocol):
"""Checks if characters in subprotocol are in range between U+0020 and
U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
requirement.
See the Section 4.1. Opening handshake of the spec.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be in the range U+0020 to U+007E.
for c in subprotocol:
if not 0x20 <= ord(c) <= 0x7e:
raise HandshakeException(
'Illegal character in subprotocol name: %r' % c)
def _check_header_lines(request, mandatory_headers):
check_request_line(request)
# The expected field names, and the meaning of their corresponding
# values, are as follows.
# |Upgrade| and |Connection|
for key, expected_value in mandatory_headers:
validate_mandatory_header(request, key, expected_value)
def _build_location(request):
"""Build WebSocket location for request."""
location_parts = []
if request.is_https():
location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
else:
location_parts.append(common.WEB_SOCKET_SCHEME)
location_parts.append('://')
host, port = parse_host_header(request)
connection_port = request.connection.local_addr[1]
if port != connection_port:
raise HandshakeException('Header/connection port mismatch: %d/%d' %
(port, connection_port))
location_parts.append(host)
if (port != get_default_port(request.is_https())):
location_parts.append(':')
location_parts.append(str(port))
location_parts.append(request.unparsed_uri)
return ''.join(location_parts)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def do_handshake(self):
"""Perform WebSocket Handshake.
On _request, we set
ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
ws_challenge_md5: WebSocket handshake information.
ws_stream: Frame generation/parsing class.
ws_version: Protocol version.
Raises:
HandshakeException: when any error happened in parsing the opening
handshake request.
"""
# 5.1 Reading the client's opening handshake.
# dispatcher sets it in self._request.
_check_header_lines(self._request, _MANDATORY_HEADERS)
self._set_resource()
self._set_subprotocol()
self._set_location()
self._set_origin()
self._set_challenge_response()
self._set_protocol_version()
self._dispatcher.do_extra_handshake(self._request)
self._send_handshake()
def _set_resource(self):
self._request.ws_resource = self._request.uri
def _set_subprotocol(self):
# |Sec-WebSocket-Protocol|
subprotocol = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if subprotocol is not None:
_validate_subprotocol(subprotocol)
self._request.ws_protocol = subprotocol
def _set_location(self):
# |Host|
host = self._request.headers_in.get(common.HOST_HEADER)
if host is not None:
self._request.ws_location = _build_location(self._request)
# TODO(ukai): check host is this host.
def _set_origin(self):
# |Origin|
origin = self._request.headers_in.get(common.ORIGIN_HEADER)
if origin is not None:
self._request.ws_origin = origin
def _set_protocol_version(self):
# |Sec-WebSocket-Draft|
draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
if draft is not None and draft != '0':
raise HandshakeException('Illegal value for %s: %s' %
(common.SEC_WEBSOCKET_DRAFT_HEADER,
draft))
self._logger.debug('Protocol version is HyBi 00')
self._request.ws_version = common.VERSION_HYBI00
self._request.ws_stream = StreamHixie75(self._request, True)
def _set_challenge_response(self):
# 5.2 4-8.
self._request.ws_challenge = self._get_challenge()
# 5.2 9. let /response/ be the MD5 finterprint of /challenge/
self._request.ws_challenge_md5 = util.md5_hash(
self._request.ws_challenge).digest()
self._logger.debug(
'Challenge: %r (%s)',
self._request.ws_challenge,
util.hexify(self._request.ws_challenge))
self._logger.debug(
'Challenge response: %r (%s)',
self._request.ws_challenge_md5,
util.hexify(self._request.ws_challenge_md5))
def _get_key_value(self, key_field):
key_value = get_mandatory_header(self._request, key_field)
self._logger.debug('%s: %r', key_field, key_value)
# 5.2 4. let /key-number_n/ be the digits (characters in the range
# U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
# interpreted as a base ten integer, ignoring all other characters
# in /key_n/.
try:
key_number = int(re.sub("\\D", "", key_value))
except:
raise HandshakeException('%s field contains no digit' % key_field)
# 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
# in /key_n/.
spaces = re.subn(" ", "", key_value)[1]
if spaces == 0:
raise HandshakeException('%s field contains no space' % key_field)
self._logger.debug(
'%s: Key-number is %d and number of spaces is %d',
key_field, key_number, spaces)
# 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
# then abort the WebSocket connection.
if key_number % spaces != 0:
raise HandshakeException(
'%s: Key-number (%d) is not an integral multiple of spaces '
'(%d)' % (key_field, key_number, spaces))
# 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
part = key_number / spaces
self._logger.debug('%s: Part is %d', key_field, part)
return part
def _get_challenge(self):
# 5.2 4-7.
key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
# 5.2 8. let /challenge/ be the concatenation of /part_1/,
challenge = ''
challenge += struct.pack('!I', key1) # network byteorder int
challenge += struct.pack('!I', key2) # network byteorder int
challenge += self._request.connection.read(8)
return challenge
def _send_handshake(self):
response = []
# 5.2 10. send the following line.
response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
# 5.2 11. send the following fields to the client.
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
response.append(format_header(
common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
if self._request.ws_protocol:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
# 5.2 12. send two bytes 0x0D 0x0A.
response.append('\r\n')
# 5.2 13. send /response/
response.append(self._request.ws_challenge_md5)
raw_response = ''.join(response)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
| bsd-3-clause |
hamonikr-root/system-config-printer-gnome | optionwidgets.py | 1 | 9067 | ## system-config-printer
## Copyright (C) 2006, 2007, 2008, 2009 Red Hat, Inc.
## Copyright (C) 2006 Florian Festi <ffesti@redhat.com>
## Copyright (C) 2007, 2008, 2009 Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import config
from gi.repository import Gtk
import cups
import gettext
gettext.install(domain=config.PACKAGE, localedir=config.localedir, unicode=True)
import ppdippstr
def OptionWidget(option, ppd, gui, tab_label=None):
"""Factory function"""
ui = option.ui
if (ui == cups.PPD_UI_BOOLEAN and
len (option.choices) != 2):
# This option is advertised as a Boolean but in fact has more
# than two choices.
print "Treating Boolean option %s as PickOne" % option.keyword
ui = cups.PPD_UI_PICKONE
if ui == cups.PPD_UI_BOOLEAN:
return OptionBool(option, ppd, gui, tab_label=tab_label)
elif ui == cups.PPD_UI_PICKONE:
return OptionPickOne(option, ppd, gui, tab_label=tab_label)
elif ui == cups.PPD_UI_PICKMANY:
return OptionPickMany(option, ppd, gui, tab_label=tab_label)
# ---------------------------------------------------------------------------
class Option:
def __init__(self, option, ppd, gui, tab_label=None):
self.option = option
self.ppd = ppd
self.gui = gui
self.enabled = True
self.tab_label = tab_label
vbox = Gtk.VBox()
self.btnConflict = Gtk.Button()
icon = Gtk.Image.new_from_stock(Gtk.STOCK_DIALOG_WARNING,
Gtk.IconSize.SMALL_TOOLBAR)
self.btnConflict.add(icon)
self.btnConflict.set_no_show_all(True) #avoid the button taking
# over control again
vbox.add(self.btnConflict) # vbox reserves space while button
#vbox.set_size_request(32, 28) # is hidden
self.conflictIcon = vbox
self.btnConflict.connect("clicked", self.on_btnConflict_clicked)
icon.show()
self.constraints = [c for c in ppd.constraints
if (c.option1 == option.keyword or
c.option2 == option.keyword)]
#for c in self.constraints:
# if not c.choice1 or not c.choice2:
# print c.option1, repr(c.choice1), c.option2, repr(c.choice2)
self.conflicts = set()
self.conflict_message = ""
def enable(self, enabled=True):
self.selector.set_sensitive (enabled)
self.enabled = enabled
def disable(self):
self.enable (False)
def is_enabled(self):
return self.enabled
def get_current_value(self):
raise NotImplemented
def is_changed(self):
return self.get_current_value()!= self.option.defchoice
def writeback(self):
#print repr(self.option.keyword), repr(self.get_current_value())
if self.enabled:
self.ppd.markOption(self.option.keyword, self.get_current_value())
def checkConflicts(self, update_others=True):
value = self.get_current_value()
for constraint in self.constraints:
if constraint.option1 == self.option.keyword:
option2 = self.gui.options.get(constraint.option2, None)
choice1 = constraint.choice1
choice2 = constraint.choice2
else:
option2 = self.gui.options.get(constraint.option1, None)
choice1 = constraint.choice2
choice2 = constraint.choice1
if option2 is None: continue
def matches (constraint_choice, value):
if constraint_choice != '':
return constraint_choice == value
return value not in ['None', 'False', 'Off']
if (matches (choice1, value) and
matches (choice2, option2.get_current_value())):
# conflict
self.conflicts.add(constraint)
if update_others:
option2.checkConflicts(update_others=False)
elif constraint in self.conflicts:
# remove conflict
self.conflicts.remove(constraint)
option2.checkConflicts(update_others=False)
tooltip = [_("Conflicts with:")]
conflicting_options = dict()
for c in self.conflicts:
if c.option1 == self.option.keyword:
option = self.gui.options.get(c.option2)
else:
option = self.gui.options.get(c.option1)
conflicting_options[option.option.keyword] = option
for option in conflicting_options.values ():
opt = option.option.text
val = option.get_current_value ()
for choice in option.option.choices:
if choice['choice'] == val:
val = ppdippstr.ppd.get (choice['text'])
tooltip.append ("%s: %s" % (opt, val))
tooltip = "\n".join(tooltip)
self.conflict_message = tooltip # XXX more verbose
if self.conflicts:
self.btnConflict.set_tooltip_text (tooltip)
self.btnConflict.show()
else:
self.btnConflict.hide()
self.gui.option_changed(self)
return self.conflicts
def on_change(self, widget):
self.checkConflicts()
def on_btnConflict_clicked(self, button):
parent = self.btnConflict
while parent != None and not isinstance (parent, Gtk.Window):
parent = parent.get_parent ()
dialog = Gtk.MessageDialog (parent,
Gtk.DialogFlags.DESTROY_WITH_PARENT |
Gtk.DialogFlags.MODAL,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.CLOSE,
self.conflict_message)
dialog.run()
dialog.destroy()
# ---------------------------------------------------------------------------
class OptionBool(Option):
def __init__(self, option, ppd, gui, tab_label=None):
self.selector = Gtk.CheckButton(ppdippstr.ppd.get (option.text))
self.label = None
self.false = u"False" # hack to allow "None" instead of "False"
self.true = u"True"
for c in option.choices:
if c["choice"] in ("None", "False", "Off"):
self.false = c["choice"]
if c["choice"] in ("True", "On"):
self.true = c["choice"]
self.selector.set_active(option.defchoice == self.true)
self.selector.set_alignment(0.0, 0.5)
self.selector.connect("toggled", self.on_change)
Option.__init__(self, option, ppd, gui, tab_label=tab_label)
def get_current_value(self):
return (self.false, self.true)[self.selector.get_active()]
# ---------------------------------------------------------------------------
class OptionPickOne(Option):
widget_name = "OptionPickOne"
def __init__(self, option, ppd, gui, tab_label=None):
self.selector = Gtk.ComboBoxText()
#self.selector.set_alignment(0.0, 0.5)
label = ppdippstr.ppd.get (option.text)
if not label.endswith (':'):
label += ':'
self.label = Gtk.Label(label=label)
self.label.set_alignment(0.0, 0.5)
selected = None
for nr, choice in enumerate(option.choices):
self.selector.append_text(ppdippstr.ppd.get (choice['text']))
if option.defchoice == choice['choice']:
selected = nr
if selected is not None:
self.selector.set_active(selected)
else:
print option.text, "unknown value:", option.defchoice
self.selector.connect("changed", self.on_change)
Option.__init__(self, option, ppd, gui, tab_label=tab_label)
def get_current_value(self):
return self.option.choices[self.selector.get_active()]['choice']
# ---------------------------------------------------------------------------
class OptionPickMany(OptionPickOne):
widget_name = "OptionPickMany"
def __init__(self, option, ppd, gui, tab_label=None):
raise NotImplemented
Option.__init__(self, option, ppd, gui, tab_label=tab_label)
| gpl-2.0 |
titusfortner/selenium | third_party/py/googlestorage/publish_release.py | 6 | 6743 | """Script for publishing new versions of Selenium to cloud storage.
When you run this script, it will use OAuth 2.0 to authenticate with
Google Cloud Storage before attempting to upload any files. This script
will fail if the authenticated account does not have write access to the
indicated bucket.
By default, this script will use the adjacent client_secrets.json for
OAuth authentication; this may be changed with the --client_secrets
flag.
Example usage:
python publish_release.py \\
--client_secrets my_secrets.json \\
--project_id foo:bar \\
--bucket releases \\
--publish_version 1.50 \\
--publish path/to/file/one.txt \\
--publish path/to/file/two.txt \\
--acl "public-read"
This will publish
http://releases.storage.googleapis.com/1.50/one.txt
http://releases.storage.googleapis.com/1.50/two.txt
"""
import logging
import mimetypes
from optparse import OptionParser
import os
import sys
try:
import gflags
except ImportError:
print ('Could not import gflags\n'
'Download available at https://code.google.com/p/'
'python-gflags/downloads/\nor run `easy_install python-gflags`')
sys.exit(1)
try:
import httplib2
except ImportError:
print ('Could not import httplib2\n'
'Download available at https://code.google.com/p/httplib2/'
'downloads/\nor run `easy_install httplib2`')
sys.exit(1)
try:
import oauth2client.client as oauthclient
import oauth2client.file as oauthfile
import oauth2client.tools as oauthtools
except ImportError:
print ('Could not import oauth2client\n'
'Download available at https://code.google.com/p/'
'google-api-python-client/downloads\nor run '
'`easy_install oauth2client`')
sys.exit(1)
FLAGS = gflags.FLAGS
gflags.DEFINE_enum(
'logging_level', 'INFO', ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string(
'client_secrets',
os.path.join(os.path.dirname(__file__), 'client_secrets.json'),
'The OAuth 2.0 client secrets file to use')
gflags.DEFINE_string(
'project_id', None, 'The Cloud Storage project id')
gflags.DEFINE_string(
'bucket', None, 'The bucket to upload to')
gflags.DEFINE_string(
'publish_version', None, 'The version being published (e.g. 1.23)')
gflags.DEFINE_multistring(
'publish', [],
'A file to publish to Cloud Storage; this may be specified multiple times')
gflags.DEFINE_enum(
'acl', 'private', ['private', 'public-read', 'authenticated-read'],
'The ACLs to assign to the uploaded files')
API_VERSION = '2'
DEFAULT_SECRETS_FILE = os.path.join(os.path.dirname(__file__),
'client_secrets.json')
OAUTH_CREDENTIALS_FILE = '.credentials.dat'
OAUTH_SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
mimetypes.add_type("application/java-archive", ".jar")
class Error(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
def __str__(self):
return '%s: %s' % (repr(self.status), repr(self.message))
def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):
"""Uploads a file to Google Cloud Storage.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: The project to upload to.
bucket_name: The bucket to upload to.
file_path: Path to the file to upload.
object_name: The name within the bucket to upload to.
acl: The ACL to assign to the uploaded file.
"""
with open(file_path, 'rb') as f:
data = f.read()
content_type, content_encoding = mimetypes.guess_type(file_path)
headers = {
'x-goog-project-id': project_id,
'x-goog-api-version': API_VERSION,
'x-goog-acl': acl,
'Content-Length': '%d' % len(data)
}
if content_type: headers['Content-Type'] = content_type
if content_type: headers['Content-Encoding'] = content_encoding
try:
response, content = auth_http.request(
'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),
method='PUT',
headers=headers,
body=data)
except httplib2.ServerNotFoundError as se:
raise Error(404, 'Server not found.')
if response.status >= 300:
raise Error(response.status, response.reason)
return content
def _authenticate(secrets_file):
"""Runs the OAuth 2.0 installed application flow.
Returns:
An authorized httplib2.Http instance.
"""
flow = oauthclient.flow_from_clientsecrets(
secrets_file,
scope=OAUTH_SCOPE,
message=('Failed to initialized OAuth 2.0 flow with secrets '
'file: %s' % secrets_file))
storage = oauthfile.Storage(OAUTH_CREDENTIALS_FILE)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = oauthtools.run_flow(flow, storage, oauthtools.argparser.parse_args(args=[]))
http = httplib2.Http()
return credentials.authorize(http)
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError as e:
logging.error('%s\\nUsage: %s ARGS\\n%s', e, argv[0], FLAGS)
sys.exit(1)
numeric_level = getattr(logging, FLAGS.logging_level.upper())
if not isinstance(numeric_level, int):
logging.error('Invalid log level: %s' % FLAGS.logging_level)
sys.exit(1)
logging.basicConfig(level=numeric_level)
if FLAGS.logging_level == 'DEBUG':
httplib2.debuglevel = 1
def die(message):
logging.fatal(message)
sys.exit(2)
if FLAGS.client_secrets is None:
die('You must specify a client secrets file via --client_secrets')
if FLAGS.project_id is None:
die('You must specify a project ID via --project_id')
if not FLAGS.bucket:
die('You must specify a bucket via --bucket')
if FLAGS.publish_version is None:
die('You must specify a published version identifier via '
'--publish_version')
auth_http = _authenticate(FLAGS.client_secrets)
published = []
for f in FLAGS.publish:
object_name = '%s/%s' % (FLAGS.publish_version, os.path.basename(f))
logging.info('Publishing %s as %s', f, object_name)
_upload(auth_http, FLAGS.project_id, FLAGS.bucket, f, object_name,
FLAGS.acl)
published.append(object_name)
if published:
base_url = 'http://%s.storage.googleapis.com/' % FLAGS.bucket
logging.info('Published:\n %s' %
'\n '.join([base_url + p for p in published]))
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
bikong2/scikit-learn | sklearn/utils/optimize.py | 122 | 5674 | """
Our own implementation of the Newton algorithm
Unlike the scipy.optimize version, this version of the Newton conjugate
gradient solver uses only one function call to retrieve the
func value, the gradient value and a callable for the Hessian matvec
product. If the function call is very expensive (e.g. for logistic
regression with large design matrix), this approach gives very
significant speedups.
"""
# This is a modified file from scipy.optimize
# Original authors: Travis Oliphant, Eric Jones
# Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour
# License: BSD
import numpy as np
import warnings
from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval, **kwargs)
if ret[0] is None:
raise _LineSearchError()
return ret
def _cg(fhess_p, fgrad, maxiter, tol):
"""
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient
fgrad : ndarray, shape (n_features,) or (n_features + 1,)
Gradient vector
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray, shape (n_features,) or (n_features + 1,)
Estimated solution
"""
xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
ri = fgrad
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
while i <= maxiter:
if np.sum(np.abs(ri)) <= tol:
break
Ap = fhess_p(psupi)
# check curvature
curv = np.dot(psupi, Ap)
if 0 <= curv <= 3 * np.finfo(np.float64).eps:
break
elif curv < 0:
if i > 0:
break
else:
# fall back to steepest descent direction
xsupi += dri0 / curv * psupi
break
alphai = dri0 / curv
xsupi += alphai * psupi
ri = ri + alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
return xsupi
def newton_cg(grad_hess, func, grad, x0, args=(), tol=1e-4,
maxiter=100, maxinner=200, line_search=True, warn=True):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args: tuple, optional
Arguments passed to func_grad_hess, func and grad.
tol : float
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int
Number of Newton iterations.
maxinner : int
Number of CG iterations.
line_search: boolean
Whether to use a line search or not.
warn: boolean
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = x0
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
if np.max(absgrad) < tol:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(func, grad, xk, xsupi, fgrad,
old_fval, old_old_fval, args=args)
except _LineSearchError:
warnings.warn('Line Search failed')
break
xk = xk + alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn("newton-cg failed to converge. Increase the "
"number of iterations.")
return xk, k
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.