repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringclasses 981 values | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15 values |
|---|---|---|---|---|---|
NLeSC/pyxenon | xenon/compat.py | 1 | 1807 | """
Define cross-platform methods.
"""
from pathlib import Path
import logging
import subprocess
import os
import sys
import site
import signal
from .create_keys import create_self_signed_cert
from .version import xenon_grpc_version
def find_xenon_grpc_jar():
"""Find the Xenon-GRPC jar-file, windows version."""
prefix = Path(sys.prefix)
user_prefix = Path(site.USER_BASE)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib',
user_prefix / 'lib',
user_prefix / 'local' / 'lib',
]
for location in locations:
jar_file = location / 'xenon-grpc-{}.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None
def kill_process(process):
"""Kill the process group associated with the given process. (posix)"""
logger = logging.getLogger('xenon')
logger.info('Terminating Xenon-GRPC server.')
os.kill(process.pid, signal.SIGINT)
process.wait()
def start_xenon_server(port=50051, disable_tls=False):
"""Start the server."""
jar_file = find_xenon_grpc_jar()
if not jar_file:
raise RuntimeError("Could not find 'xenon-grpc' jar file.")
cmd = ['java', '-jar', jar_file, '-p', str(port)]
if not disable_tls:
crt_file, key_file = create_self_signed_cert()
cmd.extend([
'--server-cert-chain', str(crt_file),
'--server-private-key', str(key_file),
'--client-cert-chain', str(crt_file)])
else:
crt_file = key_file = None
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process, crt_file, key_file
| apache-2.0 |
w1ll1am23/home-assistant | homeassistant/components/person/__init__.py | 3 | 16241 | """Support for tracking people."""
from __future__ import annotations
import logging
from typing import cast
import voluptuous as vol
from homeassistant.auth import EVENT_USER_REMOVED
from homeassistant.components import websocket_api
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE,
DOMAIN as DEVICE_TRACKER_DOMAIN,
SOURCE_TYPE_GPS,
)
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_ENTITY_ID,
ATTR_GPS_ACCURACY,
ATTR_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_ID,
CONF_NAME,
CONF_TYPE,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
STATE_HOME,
STATE_NOT_HOME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import (
Event,
HomeAssistant,
ServiceCall,
State,
callback,
split_entity_id,
)
from homeassistant.helpers import (
collection,
config_validation as cv,
entity_registry,
service,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE = "source"
ATTR_USER_ID = "user_id"
CONF_DEVICE_TRACKERS = "device_trackers"
CONF_USER_ID = "user_id"
CONF_PICTURE = "picture"
DOMAIN = "person"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 2
# Device tracker states to ignore
IGNORE_STATES = (STATE_UNKNOWN, STATE_UNAVAILABLE)
PERSON_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_USER_ID): cv.string,
vol.Optional(CONF_DEVICE_TRACKERS, default=[]): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
vol.Optional(CONF_PICTURE): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default=[]): vol.All(
cv.ensure_list, cv.remove_falsy, [PERSON_SCHEMA]
)
},
extra=vol.ALLOW_EXTRA,
)
@bind_hass
async def async_create_person(hass, name, *, user_id=None, device_trackers=None):
"""Create a new person."""
await hass.data[DOMAIN][1].async_create_item(
{
ATTR_NAME: name,
ATTR_USER_ID: user_id,
CONF_DEVICE_TRACKERS: device_trackers or [],
}
)
@bind_hass
async def async_add_user_device_tracker(
hass: HomeAssistant, user_id: str, device_tracker_entity_id: str
):
"""Add a device tracker to a person linked to a user."""
coll = cast(PersonStorageCollection, hass.data[DOMAIN][1])
for person in coll.async_items():
if person.get(ATTR_USER_ID) != user_id:
continue
device_trackers = person[CONF_DEVICE_TRACKERS]
if device_tracker_entity_id in device_trackers:
return
await coll.async_update_item(
person[collection.CONF_ID],
{CONF_DEVICE_TRACKERS: device_trackers + [device_tracker_entity_id]},
)
break
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_USER_ID): vol.Any(str, None),
vol.Optional(CONF_DEVICE_TRACKERS, default=list): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
vol.Optional(CONF_PICTURE): vol.Any(str, None),
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_USER_ID): vol.Any(str, None),
vol.Optional(CONF_DEVICE_TRACKERS, default=list): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
vol.Optional(CONF_PICTURE): vol.Any(str, None),
}
class PersonStore(Store):
"""Person storage."""
async def _async_migrate_func(self, old_version, old_data):
"""Migrate to the new version.
Migrate storage to use format of collection helper.
"""
return {"items": old_data["persons"]}
class PersonStorageCollection(collection.StorageCollection):
"""Person collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
def __init__(
self,
store: Store,
logger: logging.Logger,
id_manager: collection.IDManager,
yaml_collection: collection.YamlCollection,
):
"""Initialize a person storage collection."""
super().__init__(store, logger, id_manager)
self.yaml_collection = yaml_collection
async def _async_load_data(self) -> dict | None:
"""Load the data.
A past bug caused onboarding to create invalid person objects.
This patches it up.
"""
data = await super()._async_load_data()
if data is None:
return data
for person in data["items"]:
if person[CONF_DEVICE_TRACKERS] is None:
person[CONF_DEVICE_TRACKERS] = []
return data
async def async_load(self) -> None:
"""Load the Storage collection."""
await super().async_load()
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._entity_registry_updated
)
async def _entity_registry_updated(self, event) -> None:
"""Handle entity registry updated."""
if event.data["action"] != "remove":
return
entity_id = event.data[ATTR_ENTITY_ID]
if split_entity_id(entity_id)[0] != "device_tracker":
return
for person in list(self.data.values()):
if entity_id not in person[CONF_DEVICE_TRACKERS]:
continue
await self.async_update_item(
person[collection.CONF_ID],
{
CONF_DEVICE_TRACKERS: [
devt
for devt in person[CONF_DEVICE_TRACKERS]
if devt != entity_id
]
},
)
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
data = self.CREATE_SCHEMA(data)
user_id = data.get(CONF_USER_ID)
if user_id is not None:
await self._validate_user_id(user_id)
return data
@callback
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
user_id = update_data.get(CONF_USER_ID)
if user_id is not None and user_id != data.get(CONF_USER_ID):
await self._validate_user_id(user_id)
return {**data, **update_data}
async def _validate_user_id(self, user_id):
"""Validate the used user_id."""
if await self.hass.auth.async_get_user(user_id) is None:
raise ValueError("User does not exist")
for persons in (self.data.values(), self.yaml_collection.async_items()):
if any(person for person in persons if person.get(CONF_USER_ID) == user_id):
raise ValueError("User already taken")
async def filter_yaml_data(hass: HomeAssistantType, persons: list[dict]) -> list[dict]:
"""Validate YAML data that we can't validate via schema."""
filtered = []
person_invalid_user = []
for person_conf in persons:
user_id = person_conf.get(CONF_USER_ID)
if user_id is not None and await hass.auth.async_get_user(user_id) is None:
_LOGGER.error(
"Invalid user_id detected for person %s",
person_conf[collection.CONF_ID],
)
person_invalid_user.append(
f"- Person {person_conf[CONF_NAME]} (id: {person_conf[collection.CONF_ID]}) points at invalid user {user_id}"
)
continue
filtered.append(person_conf)
if person_invalid_user:
hass.components.persistent_notification.async_create(
f"""
The following persons point at invalid users:
{"- ".join(person_invalid_user)}
""",
"Invalid Person Configuration",
DOMAIN,
)
return filtered
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the person component."""
entity_component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
storage_collection = PersonStorageCollection(
PersonStore(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
yaml_collection,
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, entity_component, yaml_collection, Person
)
collection.sync_entity_lifecycle(
hass, DOMAIN, DOMAIN, entity_component, storage_collection, Person.from_yaml
)
await yaml_collection.async_load(
await filter_yaml_data(hass, config.get(DOMAIN, []))
)
await storage_collection.async_load()
hass.data[DOMAIN] = (yaml_collection, storage_collection)
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass, create_list=False)
websocket_api.async_register_command(hass, ws_list_person)
async def _handle_user_removed(event: Event) -> None:
"""Handle a user being removed."""
user_id = event.data[ATTR_USER_ID]
for person in storage_collection.async_items():
if person[CONF_USER_ID] == user_id:
await storage_collection.async_update_item(
person[CONF_ID], {CONF_USER_ID: None}
)
hass.bus.async_listen(EVENT_USER_REMOVED, _handle_user_removed)
async def async_reload_yaml(call: ServiceCall):
"""Reload YAML."""
conf = await entity_component.async_prepare_reload(skip_reset=True)
if conf is None:
return
await yaml_collection.async_load(
await filter_yaml_data(hass, conf.get(DOMAIN, []))
)
service.async_register_admin_service(
hass, DOMAIN, SERVICE_RELOAD, async_reload_yaml
)
return True
class Person(RestoreEntity):
"""Represent a tracked person."""
def __init__(self, config):
"""Set up person."""
self._config = config
self.editable = True
self._latitude = None
self._longitude = None
self._gps_accuracy = None
self._source = None
self._state = None
self._unsub_track_device = None
@classmethod
def from_yaml(cls, config):
"""Return entity instance initialized from yaml storage."""
person = cls(config)
person.editable = False
return person
@property
def name(self):
"""Return the name of the entity."""
return self._config[CONF_NAME]
@property
def entity_picture(self) -> str | None:
"""Return entity picture."""
return self._config.get(CONF_PICTURE)
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def state(self):
"""Return the state of the person."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes of the person."""
data = {ATTR_EDITABLE: self.editable, ATTR_ID: self.unique_id}
if self._latitude is not None:
data[ATTR_LATITUDE] = self._latitude
if self._longitude is not None:
data[ATTR_LONGITUDE] = self._longitude
if self._gps_accuracy is not None:
data[ATTR_GPS_ACCURACY] = self._gps_accuracy
if self._source is not None:
data[ATTR_SOURCE] = self._source
user_id = self._config.get(CONF_USER_ID)
if user_id is not None:
data[ATTR_USER_ID] = user_id
return data
@property
def unique_id(self):
"""Return a unique ID for the person."""
return self._config[CONF_ID]
async def async_added_to_hass(self):
"""Register device trackers."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._parse_source_state(state)
if self.hass.is_running:
# Update person now if hass is already running.
await self.async_update_config(self._config)
else:
# Wait for hass start to not have race between person
# and device trackers finishing setup.
async def person_start_hass(now):
await self.async_update_config(self._config)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, person_start_hass
)
async def async_update_config(self, config):
"""Handle when the config is updated."""
self._config = config
if self._unsub_track_device is not None:
self._unsub_track_device()
self._unsub_track_device = None
trackers = self._config[CONF_DEVICE_TRACKERS]
if trackers:
_LOGGER.debug("Subscribe to device trackers for %s", self.entity_id)
self._unsub_track_device = async_track_state_change_event(
self.hass, trackers, self._async_handle_tracker_update
)
self._update_state()
@callback
def _async_handle_tracker_update(self, event):
"""Handle the device tracker state changes."""
self._update_state()
@callback
def _update_state(self):
"""Update the state."""
latest_non_gps_home = latest_not_home = latest_gps = latest = None
for entity_id in self._config[CONF_DEVICE_TRACKERS]:
state = self.hass.states.get(entity_id)
if not state or state.state in IGNORE_STATES:
continue
if state.attributes.get(ATTR_SOURCE_TYPE) == SOURCE_TYPE_GPS:
latest_gps = _get_latest(latest_gps, state)
elif state.state == STATE_HOME:
latest_non_gps_home = _get_latest(latest_non_gps_home, state)
elif state.state == STATE_NOT_HOME:
latest_not_home = _get_latest(latest_not_home, state)
if latest_non_gps_home:
latest = latest_non_gps_home
elif latest_gps:
latest = latest_gps
else:
latest = latest_not_home
if latest:
self._parse_source_state(latest)
else:
self._state = None
self._source = None
self._latitude = None
self._longitude = None
self._gps_accuracy = None
self.async_write_ha_state()
@callback
def _parse_source_state(self, state):
"""Parse source state and set person attributes.
This is a device tracker state or the restored person state.
"""
self._state = state.state
self._source = state.entity_id
self._latitude = state.attributes.get(ATTR_LATITUDE)
self._longitude = state.attributes.get(ATTR_LONGITUDE)
self._gps_accuracy = state.attributes.get(ATTR_GPS_ACCURACY)
@websocket_api.websocket_command({vol.Required(CONF_TYPE): "person/list"})
def ws_list_person(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg
):
"""List persons."""
yaml, storage = hass.data[DOMAIN]
connection.send_result(
msg[ATTR_ID], {"storage": storage.async_items(), "config": yaml.async_items()}
)
def _get_latest(prev: State | None, curr: State):
"""Get latest state."""
if prev is None or curr.last_updated > prev.last_updated:
return curr
return prev
| apache-2.0 |
ddzialak/boto | boto/cloudfront/object.py | 170 | 1798 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.key import Key
class Object(Key):
def __init__(self, bucket, name=None):
super(Object, self).__init__(bucket, name=name)
self.distribution = bucket.distribution
def __repr__(self):
return '<Object: %s/%s>' % (self.distribution.config.origin, self.name)
def url(self, scheme='http'):
url = '%s://' % scheme
url += self.distribution.domain_name
if scheme.lower().startswith('rtmp'):
url += '/cfx/st/'
else:
url += '/'
url += self.name
return url
class StreamingObject(Object):
def url(self, scheme='rtmp'):
return super(StreamingObject, self).url(scheme)
| mit |
mikewesner-wf/glasshouse | appengine/lib/werkzeug/_internal.py | 89 | 14082 | # -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
from weakref import WeakKeyDictionary
from cStringIO import StringIO
from Cookie import SimpleCookie, Morsel, CookieError
from time import gmtime
from datetime import datetime, date
_logger = None
_empty_stream = StringIO('')
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _proxy_repr(cls):
def proxy_repr(self):
return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self))
return proxy_repr
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and not kwarg_var is not None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _patch_wrapper(old, new):
"""Helper function that forwards all the function details to the
decorated function."""
try:
new.__name__ = old.__name__
new.__module__ = old.__module__
new.__doc__ = old.__doc__
new.__dict__ = old.__dict__
except Exception:
pass
return new
def _decode_unicode(value, charset, errors):
"""Like the regular decode function but this one raises an
`HTTPUnicodeError` if errors is `strict`."""
fallback = None
if errors.startswith('fallback:'):
fallback = errors[9:]
errors = 'strict'
try:
return value.decode(charset, errors)
except UnicodeError, e:
if fallback is not None:
return value.decode(fallback, 'replace')
from werkzeug.exceptions import HTTPUnicodeError
raise HTTPUnicodeError(str(e))
def _iter_modules(path):
"""Iterate over all modules in a package."""
import os
import pkgutil
if hasattr(pkgutil, 'iter_modules'):
for importer, modname, ispkg in pkgutil.iter_modules(path):
yield modname, ispkg
return
from inspect import getmodulename
from pydoc import ispackage
found = set()
for path in path:
for filename in os.listdir(path):
p = os.path.join(path, filename)
modname = getmodulename(filename)
if modname and modname != '__init__':
if modname not in found:
found.add(modname)
yield modname, ispackage(modname)
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (int, long, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, (int, long, float)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _ExtendedMorsel(Morsel):
_reserved = {'httponly': 'HttpOnly'}
_reserved.update(Morsel._reserved)
def __init__(self, name=None, value=None):
Morsel.__init__(self)
if name is not None:
self.set(name, value, value)
def OutputString(self, attrs=None):
httponly = self.pop('httponly', False)
result = Morsel.OutputString(self, attrs).rstrip('\t ;')
if httponly:
result += '; HttpOnly'
return result
class _ExtendedCookie(SimpleCookie):
"""Form of the base cookie that doesn't raise a `CookieError` for
malformed keys. This has the advantage that broken cookies submitted
by nonstandard browsers don't cause the cookie to be empty.
"""
def _BaseCookie__set(self, key, real_value, coded_value):
morsel = self.get(key, _ExtendedMorsel())
try:
morsel.set(key, real_value, coded_value)
except CookieError:
pass
dict.__setitem__(self, key, morsel)
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _easteregg(app):
"""Like the name says. But who knows how it works?"""
gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in '''
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t'''.decode('base64').decode('zlib').splitlines()])
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(('X-Powered-By', 'Werkzeug'))
return start_response(status, headers, exc_info)
if environ.get('QUERY_STRING') != 'macgybarchakku':
return app(environ, injecting_start_response)
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
return ['''
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>''' % gyver]
return easteregged
| apache-2.0 |
craigderington/studentloan5 | studentloan5/Lib/site-packages/django/utils/html.py | 45 | 15098 | """HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
import sys
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', '\'', '!']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(
r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|'
'<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(
r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x)
for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
This function always escapes its input, even if it's already escaped and
marked as such. This may result in double-escaping. If this is a concern,
use conditional_escape() instead.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type, SafeText)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type, SafeText)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in six.iteritems(kwargs)}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
# The strict parameter was added in Python 3.2 with a default of True.
# The default changed to False in Python 3.3 and was deprecated.
if sys.version_info[:2] == (3, 2):
HTMLParser.__init__(self, strict=False)
else:
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except (HTMLParseError, UnboundLocalError):
# UnboundLocalError because of http://bugs.python.org/issue17802
# on Python 3.2, triggered by strict=False mode of HTMLParser
return s.get_data() + s.rawdata
else:
return s.get_data()
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags or length increased
# due to http://bugs.python.org/issue20288
# (affects Python 2 < 2.7.7 and Python 3 < 3.3.5)
break
value = new_value
return value
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
warnings.warn(
"django.utils.html.remove_tags() and the removetags template filter "
"are deprecated. Consider using the bleach library instead.",
RemovedInDjango110Warning, stacklevel=3
)
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
warnings.warn(
"django.utils.html.strip_entities() is deprecated.",
RemovedInDjango110Warning, stacklevel=2
)
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(force_str(segment))
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~'))
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1])))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if six.PY2:
if '__unicode__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __unicode__()." % klass.__name__
)
klass_unicode = klass.__unicode__
klass.__unicode__ = lambda self: mark_safe(klass_unicode(self))
klass.__html__ = lambda self: unicode(self) # NOQA: unicode undefined on PY3
else:
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| bsd-3-clause |
muyeby/NLP | Seg/Hmmseg/test/test.py | 1 | 6005 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: muyeby
@contact: bxf_hit@163.com
@site: http://muyeby.github.io
@software: PyCharm
@file: test.py
@time: 16-9-4 下午8:17
"""
import time
import sys
import os
import re
import math
sys.path.append(os.getcwd() + "/../../../")
sys.path.append(os.getcwd() + "/../")
from model.NameNer import decode, CNNAME
from model.NumNer import NumRec
from model.PlaceNer import PlaceRec
from model.hmm import HMM
from utils.dataset import read_dataset
from config.config import Config
TEST_FILE = 'test.txt'
DATA_DIR = os.getcwd() + '/../data/'
def viterbi(words, hmm):
N, T = len(words), len(hmm.postags)
score = [[-float('inf') for j in range(T)] for i in range(N)] # 存储中间结果
path = [[-1 for j in range(T)] for i in range(N)] # 存储路径
for i, word in enumerate(words):
if i == 0:
for j, tag in enumerate(hmm.postags):
tmps = math.log(hmm.start[tag]) if tag not in {'E', 'M'} else -1e20
score[i][j] = hmm.emit(words, i, tag) + tmps
else:
for j, tag in enumerate(hmm.postags):
# 动态规划计算概率
# Your code here, enumerate all the previous tag
(best, best_t) = max(
[(score[i - 1][y0] + hmm.trans(tag2, tag) + hmm.emit(words, i, tag), y0) for y0, tag2 in
enumerate(hmm.postags) if score[i - 1][y0] > -1e20])
score[i][j] = best
path[i][j] = best_t
best, best_t = -1e20, -1
for j, tag in enumerate(hmm.postags):
if best < score[len(words) - 1][j]:
best = score[len(words) - 1][j]
best_t = j
result = [best_t]
for i in range(len(words) - 1, 0, -1): # 回溯找出路径
result.append(path[i][result[-1]])
# convert POStag indexing to POStag str
result = [hmm.postags[t] for t in reversed(result)]
return result
def __cut(sen, hmm, start, lent):
pos = viterbi(sen, hmm)
pos_list = pos[start:start + lent]
sentence = sen[start:start + lent]
# print ' '.join(pos_list)
res = []
begin, next = 0, 0
for i, char in enumerate(sentence):
pos = pos_list[i]
if pos == 'B':
begin = i
elif pos == 'E':
ttmp = ''.join(sentence[begin:i + 1])
res.append(ttmp)
next = i + 1
elif pos == 'S':
res.append(char)
next = i + 1
if next < len(sentence):
ttmp = ''.join(sentence[next:])
res.append(ttmp)
# print ' '.join(res)
return res
def Name_Replace(namelist, sen):
for name in namelist.strip().split(' '):
index = 0
if len(name) >= 3:
while index < len(sen):
if index < len(sen) - 2 and sen[index] == name[0] and sen[index + 1] == name[1] and sen[index + 2] == \
name[2]:
tmp = name[1] + name[2]
sen = sen[:index + 1] + [tmp] + sen[index + 3:]
index += 1
index += 1
return sen
if __name__ == '__main__':
print (time.strftime('%Y-%m-%d %H:%M:%S'))
train_dataset = read_dataset()
hmm = HMM()
hmm.fit(train_dataset)
Pner = PlaceRec()
Numner = NumRec()
cname = CNNAME()
cname.fit()
separator = ' '
test_sentence = open(TEST_FILE,'rb')
re_han = re.compile(ur"([\u4E00-\u9FA5\u25cb]+)")
re_skip = re.compile(ur"^[\uff0d\-{0,1}a-zA-Z0-9\uff10-\uff19\u2014\uff21-\uff3a\uff41-\uff5a\u2026\u25cb\\.]$")
print (time.strftime('%Y-%m-%d %H:%M:%S'))
print 'Start seg...'
for line in test_sentence:
res = ''
tmpp = ''
if sys.version < '3.0':
if not (type(line) is unicode):
try:
line = line.decode('utf-8')
except:
line = line.decode('gbk', 'ignore')
print '原:',line.strip()
blocks = re_han.split(line.strip())
for blk in blocks:
if not blk:
continue
if re_han.match(blk):
# print(blk)
# for ll in hmm.raw_seg(blk):
index = 0
wlist = hmm.raw_seg(blk)
while index < len(wlist):
ll = wlist[index]
if ll:
tmpp = tmpp + ll + '\n' # for support file
if ll in hmm.idict or len(ll) == 1:
res += ll
else:
inputs = ''
llen = 0
if index - 1 > 0:
inputs += wlist[index - 1]
llen = len(wlist[index - 1])
inputs += wlist[index]
if index + 1 < len(wlist):
inputs += wlist[index + 1]
tmp = __cut(inputs, hmm, llen, len(ll))
res += separator.join(tmp)
res += separator
index += 1
else:
i = 0
while i < len(blk):
ttmp = ''
if not re_skip.match(blk[i]):
res += (blk[i] + separator)
i += 1
else:
while (i < len(blk)) and re_skip.match(blk[i]):
ttmp += blk[i]
i += 1
res += (ttmp + separator)
if res:
ttmpp = res.strip().split(' ')
res1 = Numner.NumNer(ttmpp)
res2 = Pner.Place_Ner(res1)
namelist = decode(cname, res2)
res3 = Name_Replace(namelist, res2)
ans = '/ '.join(res3)
print ans | mit |
rapilabs/django-db-constraints | django_db_constraints/autodetector.py | 1 | 2101 | from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from .operations import AlterConstraints
class MigrationAutodetectorWithDbConstraints(MigrationAutodetector):
db_constraints_operations = []
def generate_created_models(self, *args, **kwargs):
rv = super().generate_created_models(*args, **kwargs)
for (app_label, migration_operations) in self.generated_operations.items():
for operation in migration_operations:
if isinstance(operation, operations.CreateModel) and 'db_constraints' in operation.options:
db_constraints = operation.options.pop('db_constraints')
self.db_constraints_operations.append((
app_label,
AlterConstraints(name=operation.name, db_constraints=db_constraints),
))
return rv
def generate_altered_unique_together(self, *args, **kwargs):
rv = super().generate_altered_unique_together(*args, **kwargs)
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_value = old_model_state.options.get('db_constraints', {})
new_value = new_model_state.options.get('db_constraints', {})
if old_value != new_value:
self.db_constraints_operations.append((
app_label,
AlterConstraints(
name=model_name,
db_constraints=new_value,
),
))
return rv
def _sort_migrations(self, *args, **kwargs):
rv = super()._sort_migrations()
for app_label, operation in self.db_constraints_operations:
self.generated_operations.setdefault(app_label, []).append(operation)
return rv
| mit |
zhaishaomin/LDS-prefetcher-research | gem5_src/arch/x86/isa/insts/__init__.py | 91 | 2409 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["romutil",
"general_purpose",
"simd128",
"simd64",
"system",
"x87"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| apache-2.0 |
rsm5139/learning-bowtie | build/src/server.py | 1 | 10189 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import traceback
from functools import wraps
from builtins import bytes
import click
import msgpack
import flask
from flask import Flask, render_template, copy_current_request_context
from flask import request, Response
from flask_socketio import SocketIO, emit
import eventlet
class GetterNotDefined(AttributeError):
pass
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == 'username' and password == 'password'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# import the user created module
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import step_2
app = Flask(__name__)
app.debug = True
socketio = SocketIO(app, binary=True, path='' + 'socket.io')
# not sure if this is secure or how much it matters
app.secret_key = os.urandom(256)
def context(func):
def foo():
with app.app_context():
func()
return foo
class Scheduler(object):
def __init__(self, seconds, func):
self.seconds = seconds
self.func = func
self.thread = None
def start(self):
self.thread = eventlet.spawn(self.run)
def run(self):
ret = eventlet.spawn(context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except:
traceback.print_exc()
self.thread = eventlet.spawn(self.run)
def stop(self):
if self.thread:
self.thread.cancel()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/static/bundle.js')
def getbundle():
basedir = os.path.dirname(os.path.realpath(__file__))
bundle_path = basedir + '/static/bundle.js'
if os.path.isfile(bundle_path + '.gz'):
bundle = open(bundle_path + '.gz', 'rb').read()
response = flask.make_response(bundle)
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
else:
return open(bundle_path, 'r').read()
@socketio.on('INITIALIZE')
def _():
foo = copy_current_request_context(step_2.initialize)
eventlet.spawn(foo)
@socketio.on('resp#1')
def _():
foo = copy_current_request_context(step_2.timed_event)
eventlet.spawn(foo)
@socketio.on('1#click')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('1#click', 'reset_button', None)])
uniq_events.remove(('1#click', 'reset_button', None))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
user_args = []
step_2.initialize(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('6#change')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('6#change', 'r1v1_controller', 'get')])
uniq_events.remove(('6#change', 'r1v1_controller', 'get'))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
event_data['6#change'] = step_2.r1v1_controller._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
user_args = []
user_args.append(event_data['6#change'])
step_2.r1v1_listener(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('7#after_change')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('7#after_change', 'r1v2_controller', 'get')])
uniq_events.update([('6#after_change', 'r1v1_controller', 'get'), ('7#after_change', 'r1v2_controller', 'get'), ('12#change', 'r2v1_controller', 'get')])
uniq_events.remove(('7#after_change', 'r1v2_controller', 'get'))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
event_data['7#after_change'] = step_2.r1v2_controller._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
user_args = []
user_args.append(event_data['7#after_change'])
step_2.r1v2_listener(*user_args)
user_args = []
user_args.append(event_data['6#after_change'])
user_args.append(event_data['7#after_change'])
user_args.append(event_data['12#change'])
step_2.r3v1_listener(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('12#change')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('12#change', 'r2v1_controller', 'get')])
uniq_events.update([('6#after_change', 'r1v1_controller', 'get'), ('7#after_change', 'r1v2_controller', 'get'), ('12#change', 'r2v1_controller', 'get')])
uniq_events.remove(('12#change', 'r2v1_controller', 'get'))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
event_data['12#change'] = step_2.r2v1_controller._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
user_args = []
user_args.append(event_data['12#change'])
step_2.r2v1_listener(*user_args)
user_args = []
user_args.append(event_data['6#after_change'])
user_args.append(event_data['7#after_change'])
user_args.append(event_data['12#change'])
step_2.r3v1_listener(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('6#after_change')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('6#after_change', 'r1v1_controller', 'get'), ('7#after_change', 'r1v2_controller', 'get'), ('12#change', 'r2v1_controller', 'get')])
uniq_events.remove(('6#after_change', 'r1v1_controller', 'get'))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
event_data['6#after_change'] = step_2.r1v1_controller._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
user_args = []
user_args.append(event_data['6#after_change'])
user_args.append(event_data['7#after_change'])
user_args.append(event_data['12#change'])
step_2.r3v1_listener(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('9#select')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('9#select', 'r2v1', 'get')])
uniq_events.remove(('9#select', 'r2v1', 'get'))
event_data = {}
for ev in uniq_events:
comp = getattr(step_2, ev[1])
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
event_data['9#select'] = step_2.r2v1._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
user_args = []
user_args.append(event_data['9#select'])
step_2.r3v2_listener(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@click.command()
@click.option('--host', '-h', default='0.0.0.0', help='Host IP')
@click.option('--port', '-p', default=9991, help='port number')
def main(host, port):
scheds = []
sched = Scheduler(1,
step_2.page_event)
scheds.append(sched)
for sched in scheds:
sched.start()
socketio.run(app, host=host, port=port, use_reloader=False)
for sched in scheds:
sched.stop()
if __name__ == '__main__':
main()
| mit |
woodscn/scipy | scipy/signal/tests/test_max_len_seq.py | 117 | 3262 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, assert_raises, run_module_suite,
assert_allclose, assert_array_equal)
from numpy.fft import fft, ifft
from scipy.signal import max_len_seq
class TestMLS(TestCase):
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
assert_array_equal(max_len_seq(10, length=0)[0], [])
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
assert_array_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
assert_allclose(tester[0], out_len, err_msg=err_msg)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
assert_allclose(tester[1:], -1 * np.ones(out_len - 1),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
assert_array_equal(orig_m, new_m)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
numerigraphe/odoo | addons/hr_timesheet_sheet/__init__.py | 434 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sheet
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rgerkin/neuroConstruct | lib/jython/Lib/_threading_local.py | 241 | 7456 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
current_thread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = current_thread().__dict__.get(key)
if d is None:
d = {}
current_thread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
# We use the non-locking API since we might already hold the lock
# (__del__ can be called at any point by the cyclic GC).
threads = threading._enumerate()
except:
# If enumerating the current threads fails, as it seems to do
# during shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import current_thread, RLock
| gpl-2.0 |
ravindrapanda/tensorflow | tensorflow/python/platform/tf_logging.py | 9 | 8684 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging utilities."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
import threading
import six
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export
# Don't use this directly. Use _get_logger() instead.
_logger = None
_logger_lock = threading.Lock()
def _get_logger():
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('tensorflow')
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1: _interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
@tf_export('logging.log')
def log(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
@tf_export('logging.debug')
def debug(msg, *args, **kwargs):
_get_logger().debug(msg, *args, **kwargs)
@tf_export('logging.error')
def error(msg, *args, **kwargs):
_get_logger().error(msg, *args, **kwargs)
@tf_export('logging.fatal')
def fatal(msg, *args, **kwargs):
_get_logger().fatal(msg, *args, **kwargs)
@tf_export('logging.info')
def info(msg, *args, **kwargs):
_get_logger().info(msg, *args, **kwargs)
@tf_export('logging.warn')
def warn(msg, *args, **kwargs):
_get_logger().warn(msg, *args, **kwargs)
@tf_export('logging.warning')
def warning(msg, *args, **kwargs):
_get_logger().warning(msg, *args, **kwargs)
_level_names = {
FATAL: 'FATAL',
ERROR: 'ERROR',
WARN: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
}
# Mask to convert integer thread ids to unsigned quantities for logging
# purposes
_THREAD_ID_MASK = 2 * _sys.maxsize + 1
_log_prefix = None # later set to google2_log_prefix
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
@tf_export('logging.TaskLevelStatusMessage')
def TaskLevelStatusMessage(msg):
error(msg)
@tf_export('logging.flush')
def flush():
raise NotImplementedError()
# Code below is taken from pyglib/logging
@tf_export('logging.vlog')
def vlog(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token]
@tf_export('logging.log_every_n')
def log_every_n(level, msg, n, *args):
"""Log 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, not (count % n), *args)
@tf_export('logging.log_first_n')
def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
"""Log 'msg % args' at level 'level' only first 'n' times.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, count < n, *args)
@tf_export('logging.log_if')
def log_if(level, msg, condition, *args):
"""Log 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
vlog(level, msg, *args)
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe()
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return (code.co_filename, f.f_lineno)
f = f.f_back
return ('<unknown>', 0)
def google2_log_prefix(level, timestamp=None, file_and_line=None):
"""Assemble a logline prefix using the google2 format."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _GetFileAndLine()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
_get_thread_id(),
basename,
line)
return s
@tf_export('logging.get_verbosity')
def get_verbosity():
"""Return how much logging output will be produced."""
return _get_logger().getEffectiveLevel()
@tf_export('logging.set_verbosity')
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
_get_logger().setLevel(v)
def _get_thread_id():
"""Get id of current thread, suitable for logging as an unsigned quantity."""
# pylint: disable=protected-access
thread_id = six.moves._thread.get_ident()
# pylint:enable=protected-access
return thread_id & _THREAD_ID_MASK
_log_prefix = google2_log_prefix
# Controls which methods from pyglib.logging are available within the project.
# Do not add methods here without also adding to platform/tf_logging.py.
_allowed_symbols = [
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'TaskLevelStatusMessage',
'WARN',
'debug',
'error',
'fatal',
'flush',
'get_verbosity',
'info',
'log',
'log_if',
'log_every_n',
'log_first_n',
'set_verbosity',
'vlog',
'warn',
'warning',
]
tf_export('logging.DEBUG').export_constant(__name__, 'DEBUG')
tf_export('logging.ERROR').export_constant(__name__, 'ERROR')
tf_export('logging.FATAL').export_constant(__name__, 'FATAL')
tf_export('logging.INFO').export_constant(__name__, 'INFO')
tf_export('logging.WARN').export_constant(__name__, 'WARN')
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
karec/extrapypi | extrapypi/models/package.py | 1 | 1284 | import datetime
from pkg_resources import parse_version
from extrapypi.extensions import db
package_maintainers = db.Table(
'package_maintainers',
db.Column('package_id', db.Integer, db.ForeignKey('package.id', name='_fk_package_maintainers_package')),
db.Column('user_id', db.Integer, db.ForeignKey('user.id', name='_fk_package_maintainers_user')),
)
class Package(db.Model):
"""Represent a simple package
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255, convert_unicode=True), nullable=False, unique=True)
summary = db.Column(db.String(255, convert_unicode=True))
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.utcnow)
maintainers = db.relationship('User', secondary=package_maintainers, lazy='dynamic', backref='packages')
@property
def sorted_releases(self):
releases = self.releases.all()
return sorted(
releases,
key=lambda r: parse_version(r.version),
reverse=True
)
@property
def latest_release(self):
return next(iter(self.sorted_releases), None)
def __repr__(self):
return "<Package {0.name}>".format(self)
| mit |
edoburu/django-parler | parler/tests/test_model_relations.py | 2 | 2255 | from .utils import AppTestCase
from .testapp.models import TranslationRelated, TranslationRelatedRelation
from .testapp.models import ForeignKeyTranslationModel, RegularModel
class TranslationRelationTestCase(AppTestCase):
def test_related_objects_in_translation_model(self):
instance = TranslationRelated()
instance.set_current_language(self.other_lang1)
# This should not raise errors
instance.title = 'Title Lang1'
instance.save()
instance.set_current_language(self.other_lang2)
# This should not raise errors
instance.title = 'Title Lang2'
instance.save()
translation1 = instance.get_translation(self.other_lang1)
translation1.translation_relations.create(name='relation 1.1')
translation1.translation_relations.create(name='relation 1.2')
translation2 = instance.get_translation(self.other_lang2)
translation2.translation_relations.create(name='relation 2.1')
total_related_objects = TranslationRelatedRelation.objects.filter(
translation__master=instance
).count()
lang1_related_object = TranslationRelatedRelation.objects.filter(
translation__language_code=self.other_lang1,
translation__master=instance,
).count()
lang2_related_objects = TranslationRelatedRelation.objects.filter(
translation__language_code=self.other_lang2,
translation__master=instance,
).count()
self.assertEqual(3, total_related_objects)
self.assertEqual(2, lang1_related_object)
self.assertEqual(1, lang2_related_objects)
def test_translation_is_modified(self):
r1 = RegularModel.objects.create(original_field='r1')
r2 = RegularModel.objects.create(original_field='r2')
instance = ForeignKeyTranslationModel.objects.create(
translated_foreign=r1,
shared='shared',
)
translation = instance.get_translation(instance.language_code)
self.assertFalse(translation.is_modified)
instance.translated_foreign = r2
translation = instance.get_translation(instance.language_code)
self.assertTrue(translation.is_modified)
| apache-2.0 |
ptoraskar/django | django/contrib/sites/migrations/0001_initial.py | 276 | 1096 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=100, verbose_name='domain name', validators=[_simple_domain_name_validator])),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| bsd-3-clause |
jetty-project/FrameworkBenchmarks | frameworks/Python/blacksheep/app.py | 4 | 4534 | import os
import ujson
import asyncpg
from random import randint
from multiprocessing import cpu_count
from blacksheep.server import Application, ServerOptions
from blacksheep import Response, Headers, Header, Content
from jinja2 import Environment, PackageLoader, select_autoescape
json_dumps = ujson.dumps
_is_travis = os.environ.get('TRAVIS') == 'true'
workers = cpu_count()
if _is_travis:
workers = 2
db_pool = None
async def configure_db(app):
global db_pool
db_pool = await asyncpg.create_pool(
user=os.getenv('PGUSER', 'benchmarkdbuser'),
password=os.getenv('PGPASS', 'benchmarkdbpass'),
database='hello_world',
host='tfb-database',
port=5432
)
jinja_env = Environment(
loader=PackageLoader('app', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
fortune_template = jinja_env.get_template('fortune.html')
app = Application(options=ServerOptions(host='', port=8080, processes_count=workers))
app.on_start += configure_db
def get_num_queries(request):
try:
value = request.query.get('queries')
if value is None:
return 1
query_count = int(value[0])
except (KeyError, IndexError, ValueError):
return 1
if query_count < 1:
return 1
if query_count > 500:
return 500
return query_count
@app.route('/json')
async def json_test(request):
"""Test type 1: JSON Serialization"""
return Response(200, content=Content(b'application/json; charset=utf-8',
json_dumps({'message': 'Hello, world!'}).encode('utf-8')))
@app.route('/db')
async def single_db_query_test(request):
"""Test type 2: Single Database Query"""
row_id = randint(1, 10000)
connection = await db_pool.acquire()
try:
number = await connection.fetchval('SELECT "randomnumber" FROM "world" WHERE id = $1', row_id)
world = {'id': row_id, 'randomNumber': number}
finally:
await db_pool.release(connection)
return Response(200, content=Content(b'application/json; charset=utf-8',
json_dumps(world).encode('utf-8')))
@app.route('/queries')
async def multiple_db_queries_test(request):
"""Test type 3: Multiple Database Queries"""
num_queries = get_num_queries(request)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
connection = await db_pool.acquire()
try:
statement = await connection.prepare('SELECT "randomnumber" FROM "world" WHERE id = $1')
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
finally:
await db_pool.release(connection)
return Response(200, content=Content(b'application/json; charset=utf-8',
json_dumps(worlds).encode('utf-8')))
@app.route('/fortunes')
async def fortunes_test(request):
"""Test type 4: Fortunes"""
connection = await db_pool.acquire()
try:
fortunes = await connection.fetch('SELECT * FROM Fortune')
finally:
await db_pool.release(connection)
fortunes.append([0, 'Additional fortune added at request time.'])
fortunes.sort(key=lambda x: x[1])
return Response(200, Headers([
Header(b'Cache-Control', b'no-cache')
]), content=Content(b'text/html; charset=utf-8', fortune_template.render(fortunes=fortunes).encode('utf8')))
@app.route('/updates')
async def db_updates_test(request):
"""Test type 5: Database Updates"""
num_queries = get_num_queries(request)
updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(num_queries)]
worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates]
connection = await db_pool.acquire()
try:
statement = await connection.prepare('SELECT "randomnumber" FROM "world" WHERE id = $1')
for row_id, _ in updates:
await statement.fetchval(row_id)
await connection.executemany('UPDATE "world" SET "randomnumber"=$1 WHERE id=$2', updates)
finally:
await db_pool.release(connection)
return Response(200, content=Content(b'application/json',
json_dumps(worlds).encode('utf-8')))
@app.route('/plaintext')
async def plaintext_test(request):
"""Test type 6: Plaintext"""
return Response(200, content=Content(b'text/plain', b'Hello, World!'))
app.start()
| bsd-3-clause |
Sungup/Undine | undine/api/database/sqlite.py | 1 | 1434 | from undine.api.database.base_client import BaseClient
from undine.database.sqlite import SQLiteConnector
class SQLiteClient(BaseClient):
_QUERY = {
'task': '''
INSERT INTO task(tid, name, cid, iid, wid, reportable, state)
VALUES(:tid, :name, :cid, :iid, :wid, :reportable, 'R')
''',
'worker': '''
INSERT INTO worker(wid, name, command, arguments,
worker_dir, file_input)
VALUES(:wid, :name, :command,
:arguments, :worker_dir, :file_input)
''',
'input': '''
INSERT INTO input(iid, name, items)
VALUES(:iid, :name, :items)
''',
'config': '''
INSERT INTO config(cid, name, config)
VALUES (:cid, :name, :config)
'''
}
def __init__(self, config):
BaseClient.__init__(self)
self._sqlite = SQLiteConnector(config)
#
# Protected inherited methods
#
def _insert_worker(self, worker):
self._sqlite.execute_single_dml(self._QUERY['worker'], **worker)
def _insert_input(self, inputs):
self._sqlite.execute_single_dml(self._QUERY['input'], **inputs)
def _insert_config(self, config):
self._sqlite.execute_single_dml(self._QUERY['config'], **config)
def _insert_task(self, task):
self._sqlite.execute_single_dml(self._QUERY['task'], **task)
| mit |
fetzerch/xbmc | lib/freetype/src/tools/docmaker/formatter.py | 515 | 4962 | # Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| gpl-2.0 |
pysb/pysb | pysb/examples/run_earm_hpp.py | 5 | 2377 | """ Run the Extrinsic Apoptosis Reaction Model (EARM) using BioNetGen's
Hybrid-Particle Population (HPP) algorithm.
NFsim provides stochastic simulation without reaction network generation,
allowing simulation of models with large (or infinite) reaction networks by
keeping track of species counts. However, it can fail when the number of
instances of a species gets too large (typically >200000). HPP circumvents
this problem by allowing the user to define species with large instance
counts as populations rather than NFsim particles.
This example runs the EARM 1.0 model with HPP, which fails to run on NFsim
with the default settings due to large initial concentration coutns of
several species. By assigning population maps to these species, we can run
the simulation.
Reference: Hogg et al. Plos Comb Biol 2014
https://doi.org/10.1371/journal.pcbi.1003544
"""
from pysb.examples.earm_1_0 import model
from pysb.simulator import BngSimulator
from pysb.simulator.bng import PopulationMap
from pysb import Parameter
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_min_max(name, title=None):
x = np.array([tr[:][name] for tr in trajectories]).T
if not title:
title = name
plt.figure(title)
plt.plot(tout.T, x, '0.5', lw=2, alpha=0.25) # individual trajectories
plt.plot(tout[0], x.mean(1), 'k--', lw=3, label="Mean")
plt.plot(tout[0], x.min(1), 'b--', lw=3, label="Minimum")
plt.plot(tout[0], x.max(1), 'r--', lw=3, label="Maximum")
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Population of %s' % name)
PARP, CPARP, Mito, mCytoC = [model.monomers[x] for x in
['PARP', 'CPARP', 'Mito', 'mCytoC']]
klump = Parameter('klump', 10000, _export=False)
model.add_component(klump)
population_maps = [
PopulationMap(PARP(b=None), klump),
PopulationMap(CPARP(b=None), klump),
PopulationMap(Mito(b=None), klump),
PopulationMap(mCytoC(b=None), klump)
]
sim = BngSimulator(model, tspan=np.linspace(0, 20000, 101))
simres = sim.run(n_runs=20, method='nf', population_maps=population_maps)
trajectories = simres.all
tout = simres.tout
plot_mean_min_max('Bid_unbound')
plot_mean_min_max('PARP_unbound')
plot_mean_min_max('mSmac_unbound')
plot_mean_min_max('tBid_total')
plot_mean_min_max('CPARP_total')
plot_mean_min_max('cSmac_total')
plt.show()
| bsd-2-clause |
kaiueo/octs | octs/commands.py | 23 | 4250 | # -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ['requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map
.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
| bsd-3-clause |
inflector/atomspace | scripts/rule-engine/extract-bc-trace.py | 4 | 2867 | #!/usr/bin/env python2
# Given a log file, and a FCS handle, filtered that log file to retain
# only the iterations leading to that FCS.
import sys
import re
#############
# Constants #
#############
# Usage message
usage = "Usage: " + sys.argv[0] + " FCSHANDLE LOGFILE"
# Useful regex
timestamp_re = r'\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d{3}\]'
debug_re = r'\[DEBUG\]'
ure_re = r'\[URE\]'
iter_re = r'Iteration (\d+)'
iteration_re = r'({} )?{} {} {}'.format(timestamp_re, debug_re, ure_re, iter_re)
iteration_cre = re.compile(iteration_re)
select_andbit_re = r'Selected and-BIT for expansion:'
from_re = r'({} )?{} {} {}'.format(timestamp_re, debug_re, ure_re, select_andbit_re)
from_cre = re.compile(from_re)
expand_andbit_re = r'Expanded forward chainer strategy:'
to_re = r'({} )?{} {} {}'.format(timestamp_re, debug_re, ure_re, expand_andbit_re)
to_cre = re.compile(to_re)
select_bn_re = r'Selected BIT-node for expansion:'
inter_re = r'({} )?{} {} {}'.format(timestamp_re, debug_re, ure_re, select_bn_re)
inter_cre = re.compile(inter_re)
handle_re = r'\) ; (\[\d+\]\[\d+\])'
handle_cre = re.compile(handle_re)
########
# Main #
########
if len(sys.argv) != 3:
print usage
exit(1)
fcs_handle = sys.argv[1]
logfile = sys.argv[2]
# Build map from iteration to (from, to) pair
i2ft = dict()
src = ''
for l in open(logfile):
ls = l.rstrip()
# Extract iteration
m = iteration_cre.match(ls)
if m:
iteration = int(m.group(2))
continue
# Extract from/to fcs handle
m = from_cre.match(ls)
if m:
src = 'from'
continue
m = to_cre.match(ls)
if m:
src = 'to'
continue
m = inter_cre.match(ls)
if m:
src = ''
continue
m = handle_cre.match(ls)
if m:
if src == 'from':
from_handle = m.group(1)
continue
if src == 'to':
to_handle = m.group(1)
i2ft[iteration] = (from_handle, to_handle)
if fcs_handle in to_handle:
fcs_handle_iteration = iteration
break
src = '' # reset src to not overwrite i2ft
# before the end of the iteration
continue
# Filter i2ft to contain only traces to fcs_handle
i2ft_trace = dict()
iteration = fcs_handle_iteration
while iteration != 0:
i2ft_trace[iteration] = i2ft[iteration]
for i in range(iteration):
if (i in i2ft and i2ft[i][1] == i2ft[iteration][0]):
iteration = i
break
if iteration != i:
iteration = 0
# Display all iterations leading to the FCS
iteration = -1
for l in open(logfile):
ls = l.rstrip()
# Extract iteration
m = iteration_cre.match(ls)
if m:
iteration = int(m.group(2))
if iteration == 0 or iteration in i2ft_trace:
print ls
| agpl-3.0 |
smn/onadata | onadata/apps/api/tests/viewsets/test_abstract_viewset.py | 5 | 14971 | import json
import os
import re
import requests
from django.conf import settings
from django.contrib.auth.models import Permission
from django.test import TestCase
from django_digest.test import Client as DigestClient
from tempfile import NamedTemporaryFile
from django.contrib.auth.models import User
from django_digest.test import DigestAuth
from django.contrib.auth import authenticate
from httmock import urlmatch, HTTMock
from rest_framework.test import APIRequestFactory
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.main.models import UserProfile, MetaData
from onadata.apps.main import tests as main_tests
from onadata.apps.logger.models import Attachment
from onadata.apps.logger.models import Instance
from onadata.apps.logger.models import XForm
from onadata.apps.logger.models import Project
from onadata.libs.serializers.project_serializer import ProjectSerializer
from onadata.apps.logger.views import submission
@urlmatch(netloc=r'(.*\.)?enketo\.ona\.io$')
def enketo_mock(url, request):
response = requests.Response()
response.status_code = 201
response._content = \
'{\n "url": "https:\\/\\/dmfrm.enketo.org\\/webform",\n'\
' "code": "200"\n}'
return response
class TestAbstractViewSet(TestCase):
surveys = ['transport_2011-07-25_19-05-49',
'transport_2011-07-25_19-05-36',
'transport_2011-07-25_19-06-01',
'transport_2011-07-25_19-06-14']
main_directory = os.path.dirname(main_tests.__file__)
profile_data = {
'username': 'bob',
'email': 'bob@columbia.edu',
'password1': 'bobbob',
'password2': 'bobbob',
'first_name': 'Bob',
'last_name': 'erama',
'city': 'Bobville',
'country': 'US',
'organization': 'Bob Inc.',
'home_page': 'bob.com',
'twitter': 'boberama',
'name': u'Bob erama'
}
def setUp(self):
TestCase.setUp(self)
self.factory = APIRequestFactory()
self._login_user_and_profile()
self.maxDiff = None
def user_profile_data(self):
return {
'id': self.user.pk,
'url': 'http://testserver/api/v1/profiles/bob',
'username': u'bob',
'first_name': u'Bob',
'last_name': 'erama',
'email': u'bob@columbia.edu',
'city': u'Bobville',
'country': u'US',
'organization': u'Bob Inc.',
'website': u'bob.com',
'twitter': u'boberama',
'gravatar': self.user.profile.gravatar,
'require_auth': False,
'user': 'http://testserver/api/v1/users/bob',
'is_org': False,
'metadata': {},
'joined_on': self.user.date_joined,
'name': u'Bob erama'
}
def _set_api_permissions(self, user):
add_userprofile = Permission.objects.get(
content_type__app_label='main', content_type__model='userprofile',
codename='add_userprofile')
user.user_permissions.add(add_userprofile)
def _create_user_profile(self, extra_post_data={}):
self.profile_data = dict(
self.profile_data.items() + extra_post_data.items())
user, created = User.objects.get_or_create(
username=self.profile_data['username'],
first_name=self.profile_data['first_name'],
last_name=self.profile_data['last_name'],
email=self.profile_data['email'])
user.set_password(self.profile_data['password1'])
user.save()
new_profile, created = UserProfile.objects.get_or_create(
user=user, name=self.profile_data['first_name'],
city=self.profile_data['city'],
country=self.profile_data['country'],
organization=self.profile_data['organization'],
home_page=self.profile_data['home_page'],
twitter=self.profile_data['twitter'],
require_auth=False
)
return new_profile
def _login_user_and_profile(self, extra_post_data={}):
profile = self._create_user_profile(extra_post_data)
self.user = profile.user
self.assertTrue(
self.client.login(username=self.user.username,
password=self.profile_data['password1']))
self.extra = {
'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
def _org_create(self):
view = OrganizationProfileViewSet.as_view({
'get': 'list',
'post': 'create'
})
request = self.factory.get('/', **self.extra)
response = view(request)
self.assertEqual(response.status_code, 200)
data = {
'org': u'denoinc',
'name': u'Dennis',
'city': u'Denoville',
'country': u'US',
'home_page': u'deno.com',
'twitter': u'denoinc',
'description': u'',
'address': u'',
'phonenumber': u'',
'require_auth': False,
}
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request)
self.assertEqual(response.status_code, 201)
data['url'] = 'http://testserver/api/v1/orgs/denoinc'
data['user'] = 'http://testserver/api/v1/users/denoinc'
data['creator'] = 'http://testserver/api/v1/users/bob'
self.assertDictContainsSubset(data, response.data)
self.company_data = response.data
self.organization = OrganizationProfile.objects.get(
user__username=data['org'])
def _project_create(self, project_data={}, merge=True):
view = ProjectViewSet.as_view({
'post': 'create'
})
if merge:
data = {
'name': u'demo',
'owner':
'http://testserver/api/v1/users/%s' % self.user.username,
'metadata': {'description': 'Some description',
'location': 'Naivasha, Kenya',
'category': 'governance'},
'public': False
}
data.update(project_data)
else:
data = project_data
request = self.factory.post(
'/', data=json.dumps(data),
content_type="application/json", **self.extra)
response = view(request, owner=self.user.username)
self.assertEqual(response.status_code, 201)
self.project = Project.objects.filter(
name=data['name'], created_by=self.user)[0]
data['url'] = 'http://testserver/api/v1/projects/%s'\
% self.project.pk
self.assertDictContainsSubset(data, response.data)
request.user = self.user
self.project_data = ProjectSerializer(
self.project, context={'request': request}).data
def _publish_xls_form_to_project(self, publish_data={}, merge=True,
public=False, xlsform_path=None):
if not hasattr(self, 'project'):
self._project_create()
elif self.project.created_by != self.user:
self._project_create()
view = ProjectViewSet.as_view({
'post': 'forms'
})
project_id = self.project.pk
if merge:
data = {
'owner': 'http://testserver/api/v1/users/%s'
% self.project.organization.username,
'public': False,
'public_data': False,
'description': u'transportation_2011_07_25',
'downloadable': True,
'allows_sms': False,
'encrypted': False,
'sms_id_string': u'transportation_2011_07_25',
'id_string': u'transportation_2011_07_25',
'title': u'transportation_2011_07_25',
'bamboo_dataset': u''
}
data.update(publish_data)
else:
data = publish_data
path = xlsform_path or os.path.join(
settings.PROJECT_ROOT, "apps", "main", "tests", "fixtures",
"transportation", "transportation.xls")
with HTTMock(enketo_mock):
with open(path) as xls_file:
post_data = {'xls_file': xls_file}
request = self.factory.post('/', data=post_data, **self.extra)
response = view(request, pk=project_id)
self.assertEqual(response.status_code, 201)
self.xform = XForm.objects.all().order_by('pk').reverse()[0]
data.update({
'url':
'http://testserver/api/v1/forms/%s' % (self.xform.pk)
})
# Input was a private so change to public if project public
if public:
data['public_data'] = data['public'] = True
self.form_data = response.data
def _add_uuid_to_submission_xml(self, path, xform):
tmp_file = NamedTemporaryFile(delete=False)
split_xml = None
with open(path) as _file:
split_xml = re.split(r'(<transport>)', _file.read())
split_xml[1:1] = [
'<formhub><uuid>%s</uuid></formhub>' % xform.uuid
]
tmp_file.write(''.join(split_xml))
path = tmp_file.name
tmp_file.close()
return path
def _make_submission(self, path, username=None, add_uuid=False,
forced_submission_time=None,
client=None, media_file=None, auth=None):
# store temporary file with dynamic uuid
self.factory = APIRequestFactory()
if auth is None:
auth = DigestAuth(self.profile_data['username'],
self.profile_data['password1'])
tmp_file = None
if add_uuid:
path = self._add_uuid_to_submission_xml(path, self.xform)
with open(path) as f:
post_data = {'xml_submission_file': f}
if media_file is not None:
if isinstance(media_file, list):
for c in range(len(media_file)):
post_data['media_file_{}'.format(c)] = media_file[c]
else:
post_data['media_file'] = media_file
if username is None:
username = self.user.username
url_prefix = '%s/' % username if username else ''
url = '/%ssubmission' % url_prefix
request = self.factory.post(url, post_data)
request.user = authenticate(username=auth.username,
password=auth.password)
self.response = submission(request, username=username)
if auth and self.response.status_code == 401:
request.META.update(auth(request.META, self.response))
self.response = submission(request, username=username)
if forced_submission_time:
instance = Instance.objects.order_by('-pk').all()[0]
instance.date_created = forced_submission_time
instance.save()
instance.parsed_instance.save()
# remove temporary file if stored
if add_uuid:
os.unlink(tmp_file.name)
def _make_submissions(self, username=None, add_uuid=False,
should_store=True):
"""Make test fixture submissions to current xform.
:param username: submit under this username, default None.
:param add_uuid: add UUID to submission, default False.
:param should_store: should submissions be save, default True.
"""
paths = [os.path.join(
self.main_directory, 'fixtures', 'transportation',
'instances', s, s + '.xml') for s in self.surveys]
pre_count = Instance.objects.count()
auth = DigestAuth(self.profile_data['username'],
self.profile_data['password1'])
for path in paths:
self._make_submission(path, username, add_uuid, auth=auth)
post_count = pre_count + len(self.surveys) if should_store\
else pre_count
self.assertEqual(Instance.objects.count(), post_count)
self.assertEqual(self.xform.instances.count(), post_count)
xform = XForm.objects.get(pk=self.xform.pk)
self.assertEqual(xform.num_of_submissions, post_count)
self.assertEqual(xform.user.profile.num_of_submissions, post_count)
def _submit_transport_instance_w_attachment(self,
survey_at=0,
media_file=None):
s = self.surveys[survey_at]
if not media_file:
media_file = "1335783522563.jpg"
path = os.path.join(self.main_directory, 'fixtures',
'transportation', 'instances', s, media_file)
with open(path) as f:
self._make_submission(os.path.join(
self.main_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'), media_file=f)
attachment = Attachment.objects.all().reverse()[0]
self.attachment = attachment
def _post_form_metadata(self, data, test=True):
count = MetaData.objects.count()
view = MetaDataViewSet.as_view({'post': 'create'})
request = self.factory.post('/', data, **self.extra)
response = view(request)
if test:
self.assertEqual(response.status_code, 201)
another_count = MetaData.objects.count()
self.assertEqual(another_count, count + 1)
self.metadata = MetaData.objects.get(pk=response.data['id'])
self.metadata_data = response.data
return response
def _add_form_metadata(self, xform, data_type, data_value, path=None):
data = {
'data_type': data_type,
'data_value': data_value,
'xform': xform.pk
}
if path and data_value:
with open(path) as media_file:
data.update({
'data_file': media_file,
})
self._post_form_metadata(data)
else:
self._post_form_metadata(data)
def _get_digest_client(self):
self.user.profile.require_auth = True
self.user.profile.save()
client = DigestClient()
client.set_authorization(self.profile_data['username'],
self.profile_data['password1'],
'Digest')
return client
| bsd-2-clause |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/muenchentv.py | 64 | 2126 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
)
class MuenchenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream'
IE_DESC = 'münchen.tv'
_TEST = {
'url': 'http://www.muenchen.tv/livestream/',
'info_dict': {
'id': '5334',
'display_id': 'live',
'ext': 'mp4',
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
'thumbnail': r're:^https?://.*\.jpg$'
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = 'live'
webpage = self._download_webpage(url, display_id)
title = self._live_title(self._og_search_title(webpage))
data_js = self._search_regex(
r'(?s)\nplaylist:\s*(\[.*?}\]),',
webpage, 'playlist configuration')
data_json = js_to_json(data_js)
data = json.loads(data_json)[0]
video_id = data['mediaid']
thumbnail = data.get('image')
formats = []
for format_num, s in enumerate(data['sources']):
ext = determine_ext(s['file'], None)
label_str = s.get('label')
if label_str is None:
label_str = '_%d' % format_num
if ext is None:
format_id = label_str
else:
format_id = '%s-%s' % (ext, label_str)
formats.append({
'url': s['file'],
'tbr': int_or_none(s.get('label')),
'ext': 'mp4',
'format_id': format_id,
'preference': -100 if '.smil' in s['file'] else 0,
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'formats': formats,
'is_live': True,
'thumbnail': thumbnail,
}
| gpl-2.0 |
drxaero/calibre | src/html5lib/filters/lint.py | 979 | 4306 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
| gpl-3.0 |
deshbandhu-renovite/receipt-eCommerce | node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| gpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/numpy/polynomial/tests/test_legendre.py | 123 | 16522 | """Tests for legendre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])
L1 = np.array([0, 1])
L2 = np.array([-1, 0, 3])/2
L3 = np.array([0, -3, 0, 5])/2
L4 = np.array([3, 0, -30, 0, 35])/8
L5 = np.array([0, 15, 0, -70, 0, 63])/8
L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16
L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16
L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128
L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128
Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
def trim(x):
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self):
assert_equal(leg.legzero, [0])
def test_legone(self):
assert_equal(leg.legone, [1])
def test_legx(self):
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0, 1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_legval(self):
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1, 0]).shape, dims)
assert_equal(leg.legval(x, [1, 0, 0]).shape, dims)
def test_legval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = leg.legval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_legval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = leg.legval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.legval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_leggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = leg.leggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_leggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = leg.leggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = leg.leggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_legint(self):
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
res = leg.legint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c) for c in c2d])
res = leg.legint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
res = leg.legint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_legder(self):
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_legder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
res = leg.legder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legder(c) for c in c2d])
res = leg.legder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_legvander(self):
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
def test_legvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = leg.legvander2d(x1, x2, [1, 2])
tgt = leg.legval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_legvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
tgt = leg.legval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_legfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
assert_raises(ValueError, leg.legcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(leg.legcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_legfromroots(self):
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self):
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self):
assert_equal(leg.legline(3, 4), [3, 4])
def test_leg2poly(self):
for i in range(10):
assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
def test_poly2leg(self):
for i in range(10):
assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
SciDevs/heroku-buildpack-python-scikit-learn-git | vendor/virtualenv-1.7/docs/conf.py | 19 | 4191 | # -*- coding: utf-8 -*-
#
# Paste documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here.
#sys.path.append('some/directory')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
## FIXME: disabled for now because I haven't figured out how to use this:
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'virtualenv'
copyright = '2007-2011, Ian Bicking, The Open Planning Project, The virtualenv developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
release = "1.7"
version = ".".join(release.split(".")[:2])
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'default.css'
html_theme = 'nature'
html_theme_path = ['_theme']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pastedoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#latex_documents = []
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit |
mgree/sigplan | lda/by_year.py | 2 | 2049 | import codecs
import re
import sys
from operator import add
def quote(s):
return '"' + s + '"'
title = re.compile(u'(.*) \((.*) (\\d*)\)$')
def split_title(doc):
m = title.match(doc)
return [m.group(3),m.group(2),quote(m.group(1))]
def run(docs,gammas,normalize=True):
# show the header
num_topics = len(gamma[0].split())
# topics per document, collecting conference names
years = {}
all_years = set()
confs = set()
for d,g in zip(docs, gammas):
year,conf,title = split_title(d)
ts = map(float,g.split())
if year not in years:
years[year] = {}
confs.add(conf)
if conf not in years[year]:
years[year][conf] = [1] + ts
else:
years[year][conf] = map(add, [1] + ts, years[year][conf])
# fix a conference order
conf_order = list(confs)
conf_order.sort()
# print out the header
header = ["Year"]
for conf in conf_order:
header += [conf + " # of papers"] + [conf + " Topic " + str(i) for i in range(0,num_topics)]
print ','.join(header)
# print out the topics
for year in years:
tvals = []
for conf in conf_order:
if conf in years[year]:
if normalize:
ts = years[year][conf]
num_papers = ts[0]
for i in range(1,len(ts)):
ts[i] = ts[i] / float(num_papers)
tvals += ts
else:
tvals += years[year][conf]
else:
tvals += [0] + ["" for i in range(0,num_topics)]
print ','.join([year] + map(str,tvals))
def read(f, enc="utf8"):
return map(lambda s: s.strip(),codecs.open(f,"r",enc).readlines())
if (__name__ == '__main__'):
args = dict(enumerate(sys.argv))
gamma = read(args.get(1,"final.gamma"))
docs = read(args.get(2,"../docs.dat"))
nflag = args.get(3,"--normalize")
run(docs, gamma, normalize=(nflag == "--normalize"))
| mit |
CharlieCorner/manga_downloader | src/ConvertPackage/image.py | 3 | 5281 | #!/usr/bin/env python
# Copyright (C) 2010 Alex Yatskov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PIL import Image, ImageDraw
class ImageFlags:
Orient = 1 << 0
Resize = 1 << 1
Frame = 1 << 2
Quantize = 1 << 3
class KindleData:
Palette4 = [
0x00, 0x00, 0x00,
0x55, 0x55, 0x55,
0xaa, 0xaa, 0xaa,
0xff, 0xff, 0xff
]
Palette15a = [
0x00, 0x00, 0x00,
0x11, 0x11, 0x11,
0x22, 0x22, 0x22,
0x33, 0x33, 0x33,
0x44, 0x44, 0x44,
0x55, 0x55, 0x55,
0x66, 0x66, 0x66,
0x77, 0x77, 0x77,
0x88, 0x88, 0x88,
0x99, 0x99, 0x99,
0xaa, 0xaa, 0xaa,
0xbb, 0xbb, 0xbb,
0xcc, 0xcc, 0xcc,
0xdd, 0xdd, 0xdd,
0xff, 0xff, 0xff,
]
Palette15b = [
0x00, 0x00, 0x00,
0x11, 0x11, 0x11,
0x22, 0x22, 0x22,
0x33, 0x33, 0x33,
0x44, 0x44, 0x44,
0x55, 0x55, 0x55,
0x77, 0x77, 0x77,
0x88, 0x88, 0x88,
0x99, 0x99, 0x99,
0xaa, 0xaa, 0xaa,
0xbb, 0xbb, 0xbb,
0xcc, 0xcc, 0xcc,
0xdd, 0xdd, 0xdd,
0xee, 0xee, 0xee,
0xff, 0xff, 0xff,
]
Palette16 = [
0x00, 0x00, 0x00,
0x11, 0x11, 0x11,
0x22, 0x22, 0x22,
0x33, 0x33, 0x33,
0x44, 0x44, 0x44,
0x55, 0x55, 0x55,
0x66, 0x66, 0x66,
0x77, 0x77, 0x77,
0x88, 0x88, 0x88,
0x99, 0x99, 0x99,
0xaa, 0xaa, 0xaa,
0xbb, 0xbb, 0xbb,
0xcc, 0xcc, 0xcc,
0xdd, 0xdd, 0xdd,
0xee, 0xee, 0xee,
0xff, 0xff, 0xff,
]
Profiles = {
'Kindle 1': ((600, 800), Palette4),
'Kindle 2': ((600, 800), Palette15a),
'Kindle 3': ((600, 800), Palette15a),
'Kindle 4': ((600, 800), Palette15b),
'Kindle 5': ((758, 1024), Palette16),
'Kindle DX': ((824, 1200), Palette15a),
'Kindle DXG': ((824, 1200), Palette15a)
}
def quantizeImage(image, palette):
colors = len(palette) / 3
if colors < 256:
palette = palette + palette[:3] * (256 - colors)
palImg = Image.new('P', (1, 1))
palImg.putpalette(palette)
return image.quantize(palette=palImg)
def resizeImage(image, size):
widthDev, heightDev = size
widthImg, heightImg = image.size
if widthImg <= widthDev and heightImg <= heightDev:
return image
ratioImg = float(widthImg) / float(heightImg)
ratioWidth = float(widthImg) / float(widthDev)
ratioHeight = float(heightImg) / float(heightDev)
if ratioWidth > ratioHeight:
widthImg = widthDev
heightImg = int(widthDev / ratioImg)
elif ratioWidth < ratioHeight:
heightImg = heightDev
widthImg = int(heightDev * ratioImg)
else:
widthImg, heightImg = size
return image.resize((widthImg, heightImg), Image.ANTIALIAS)
def formatImage(image):
if image.mode == 'RGB':
return image
return image.convert('RGB')
def orientImage(image, size):
widthDev, heightDev = size
widthImg, heightImg = image.size
if (widthImg > heightImg) != (widthDev > heightDev):
return image.rotate(90, Image.BICUBIC, True)
return image
def frameImage(image, foreground, background, size):
widthDev, heightDev = size
widthImg, heightImg = image.size
pastePt = (
max(0, (widthDev - widthImg) / 2),
max(0, (heightDev - heightImg) / 2)
)
corner1 = (
pastePt[0] - 1,
pastePt[1] - 1
)
corner2 = (
pastePt[0] + widthImg + 1,
pastePt[1] + heightImg + 1
)
imageBg = Image.new(image.mode, size, background)
imageBg.paste(image, pastePt)
draw = ImageDraw.Draw(imageBg)
draw.rectangle([corner1, corner2], outline=foreground)
return imageBg
def convertImage(source, target, device, flags):
try:
size, palette = KindleData.Profiles[device]
except KeyError:
raise RuntimeError('Unexpected output device %s' % device)
try:
image = Image.open(source)
except IOError:
raise RuntimeError('Cannot read image file %s' % source)
image = formatImage(image)
if flags & ImageFlags.Orient:
image = orientImage(image, size)
if flags & ImageFlags.Resize:
image = resizeImage(image, size)
if flags & ImageFlags.Frame:
image = frameImage(image, tuple(palette[:3]), tuple(palette[-3:]), size)
if flags & ImageFlags.Quantize:
image = quantizeImage(image, palette)
try:
image.save(target)
except IOError:
raise RuntimeError('Cannot write image file %s' % target)
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/bson/json_util.py | 12 | 9080 | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for using Python's :mod:`json` module with BSON documents.
This module provides two helper methods `dumps` and `loads` that wrap the
native :mod:`json` methods and provide explicit BSON conversion to and from
json. This allows for specialized encoding and decoding of BSON documents
into `Mongo Extended JSON
<http://www.mongodb.org/display/DOCS/Mongo+Extended+JSON>`_'s *Strict*
mode. This lets you encode / decode BSON documents to JSON even when
they use special BSON types.
Example usage (serialization):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary("\x01\x02\x03\x04")}])
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Example usage (deserialization):
.. doctest::
>>> from bson.json_util import loads
>>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "00", "$binary": "AQIDBA=="}}]')
[{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 0)}]
Alternatively, you can manually pass the `default` to :func:`json.dumps`.
It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code`
instances (as they are extended strings you can't provide custom defaults),
but it will be faster as there is less recursion.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances. (But not in Python 2.4.)
.. versionchanged:: 2.3
Added dumps and loads helpers to automatically handle conversion to and
from json and supports :class:`~bson.binary.Binary` and
:class:`~bson.code.Code`
.. versionchanged:: 1.9
Handle :class:`uuid.UUID` instances, whenever possible.
.. versionchanged:: 1.8
Handle timezone aware datetime instances on encode, decode to
timezone aware datetime instances.
.. versionchanged:: 1.8
Added support for encoding/decoding :class:`~bson.max_key.MaxKey`
and :class:`~bson.min_key.MinKey`, and for encoding
:class:`~bson.timestamp.Timestamp`.
.. versionchanged:: 1.2
Added support for encoding/decoding datetimes and regular expressions.
"""
import base64
import calendar
import datetime
import re
json_lib = True
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json_lib = False
import bson
from bson import EPOCH_AWARE, RE_TYPE, SON
from bson.binary import Binary
from bson.code import Code
from bson.dbref import DBRef
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.py3compat import PY3, binary_type, string_types
_RE_OPT_TABLE = {
"i": re.I,
"l": re.L,
"m": re.M,
"s": re.S,
"u": re.U,
"x": re.X,
}
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :class:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances. (But not in Python 2.4.)
"""
if not json_lib:
raise Exception("No json library available")
return json.dumps(_json_convert(obj), *args, **kwargs)
def loads(s, *args, **kwargs):
"""Helper function that wraps :class:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
:Parameters:
- `compile_re` (optional): if ``False``, don't attempt to compile BSON
regular expressions into Python regular expressions. Return instances
of :class:`~bson.bsonregex.BSONRegex` instead.
.. versionchanged:: 2.7
Added ``compile_re`` option.
"""
if not json_lib:
raise Exception("No json library available")
compile_re = kwargs.pop('compile_re', True)
kwargs['object_hook'] = lambda dct: object_hook(dct, compile_re)
return json.loads(s, *args, **kwargs)
def _json_convert(obj):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support
return SON(((k, _json_convert(v)) for k, v in obj.iteritems()))
elif hasattr(obj, '__iter__') and not isinstance(obj, string_types):
return list((_json_convert(v) for v in obj))
try:
return default(obj)
except TypeError:
return obj
def object_hook(dct, compile_re=True):
if "$oid" in dct:
return ObjectId(str(dct["$oid"]))
if "$ref" in dct:
return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None))
if "$date" in dct:
secs = float(dct["$date"]) / 1000.0
return EPOCH_AWARE + datetime.timedelta(seconds=secs)
if "$regex" in dct:
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in dct.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
if compile_re:
return re.compile(dct["$regex"], flags)
else:
return Regex(dct["$regex"], flags)
if "$minKey" in dct:
return MinKey()
if "$maxKey" in dct:
return MaxKey()
if "$binary" in dct:
if isinstance(dct["$type"], int):
dct["$type"] = "%02x" % dct["$type"]
subtype = int(dct["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(dct["$type"][6:], 16)
return Binary(base64.b64decode(dct["$binary"].encode()), subtype)
if "$code" in dct:
return Code(dct["$code"], dct.get("$scope"))
if bson.has_uuid() and "$uuid" in dct:
return bson.uuid.UUID(dct["$uuid"])
return dct
def default(obj):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict. This works with
# the "json" standard library in Python 2.6+ and with simplejson
# 2.1.0+ in Python 2.5+, because those libraries iterate the SON
# using PyIter_Next. Python 2.4 must use simplejson 2.0.9 or older,
# and those versions of simplejson use the lower-level PyDict_Next,
# which bypasses SON's order-preserving iteration, so we lose key
# order in Python 2.4.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc())
if isinstance(obj, datetime.datetime):
# TODO share this code w/ bson.py?
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000)
return {"$date": millis}
if isinstance(obj, (RE_TYPE, Regex)):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
if isinstance(obj.pattern, unicode):
pattern = obj.pattern
else:
pattern = obj.pattern.decode('utf-8')
return SON([("$regex", pattern), ("$options", flags)])
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
return SON([("t", obj.time), ("i", obj.inc)])
if isinstance(obj, Code):
return SON([('$code', str(obj)), ('$scope', obj.scope)])
if isinstance(obj, Binary):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "%02x" % obj.subtype)])
if PY3 and isinstance(obj, binary_type):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "00")])
if bson.has_uuid() and isinstance(obj, bson.uuid.UUID):
return {"$uuid": obj.hex}
raise TypeError("%r is not JSON serializable" % obj)
| agpl-3.0 |
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/test/test_protocols.py | 2 | 7629 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols package.
"""
from twisted.trial import unittest
from twisted.protocols import wire, portforward
from twisted.internet import reactor, defer, address, protocol
from twisted.test import proto_helpers
class WireTestCase(unittest.TestCase):
"""
Test wire protocols.
"""
def test_echo(self):
"""
Test wire.Echo protocol: send some data and check it send it back.
"""
t = proto_helpers.StringTransport()
a = wire.Echo()
a.makeConnection(t)
a.dataReceived("hello")
a.dataReceived("world")
a.dataReceived("how")
a.dataReceived("are")
a.dataReceived("you")
self.assertEqual(t.value(), "helloworldhowareyou")
def test_who(self):
"""
Test wire.Who protocol.
"""
t = proto_helpers.StringTransport()
a = wire.Who()
a.makeConnection(t)
self.assertEqual(t.value(), "root\r\n")
def test_QOTD(self):
"""
Test wire.QOTD protocol.
"""
t = proto_helpers.StringTransport()
a = wire.QOTD()
a.makeConnection(t)
self.assertEqual(t.value(),
"An apple a day keeps the doctor away.\r\n")
def test_discard(self):
"""
Test wire.Discard protocol.
"""
t = proto_helpers.StringTransport()
a = wire.Discard()
a.makeConnection(t)
a.dataReceived("hello")
a.dataReceived("world")
a.dataReceived("how")
a.dataReceived("are")
a.dataReceived("you")
self.assertEqual(t.value(), "")
class TestableProxyClientFactory(portforward.ProxyClientFactory):
"""
Test proxy client factory that keeps the last created protocol instance.
@ivar protoInstance: the last instance of the protocol.
@type protoInstance: L{portforward.ProxyClient}
"""
def buildProtocol(self, addr):
"""
Create the protocol instance and keeps track of it.
"""
proto = portforward.ProxyClientFactory.buildProtocol(self, addr)
self.protoInstance = proto
return proto
class TestableProxyFactory(portforward.ProxyFactory):
"""
Test proxy factory that keeps the last created protocol instance.
@ivar protoInstance: the last instance of the protocol.
@type protoInstance: L{portforward.ProxyServer}
@ivar clientFactoryInstance: client factory used by C{protoInstance} to
create forward connections.
@type clientFactoryInstance: L{TestableProxyClientFactory}
"""
def buildProtocol(self, addr):
"""
Create the protocol instance, keeps track of it, and makes it use
C{clientFactoryInstance} as client factory.
"""
proto = portforward.ProxyFactory.buildProtocol(self, addr)
self.clientFactoryInstance = TestableProxyClientFactory()
# Force the use of this specific instance
proto.clientProtocolFactory = lambda: self.clientFactoryInstance
self.protoInstance = proto
return proto
class Portforwarding(unittest.TestCase):
"""
Test port forwarding.
"""
def setUp(self):
self.serverProtocol = wire.Echo()
self.clientProtocol = protocol.Protocol()
self.openPorts = []
def tearDown(self):
try:
self.proxyServerFactory.protoInstance.transport.loseConnection()
except AttributeError:
pass
try:
pi = self.proxyServerFactory.clientFactoryInstance.protoInstance
pi.transport.loseConnection()
except AttributeError:
pass
try:
self.clientProtocol.transport.loseConnection()
except AttributeError:
pass
try:
self.serverProtocol.transport.loseConnection()
except AttributeError:
pass
return defer.gatherResults(
[defer.maybeDeferred(p.stopListening) for p in self.openPorts])
def test_portforward(self):
"""
Test port forwarding through Echo protocol.
"""
realServerFactory = protocol.ServerFactory()
realServerFactory.protocol = lambda: self.serverProtocol
realServerPort = reactor.listenTCP(0, realServerFactory,
interface='127.0.0.1')
self.openPorts.append(realServerPort)
self.proxyServerFactory = TestableProxyFactory('127.0.0.1',
realServerPort.getHost().port)
proxyServerPort = reactor.listenTCP(0, self.proxyServerFactory,
interface='127.0.0.1')
self.openPorts.append(proxyServerPort)
nBytes = 1000
received = []
d = defer.Deferred()
def testDataReceived(data):
received.extend(data)
if len(received) >= nBytes:
self.assertEqual(''.join(received), 'x' * nBytes)
d.callback(None)
self.clientProtocol.dataReceived = testDataReceived
def testConnectionMade():
self.clientProtocol.transport.write('x' * nBytes)
self.clientProtocol.connectionMade = testConnectionMade
clientFactory = protocol.ClientFactory()
clientFactory.protocol = lambda: self.clientProtocol
reactor.connectTCP(
'127.0.0.1', proxyServerPort.getHost().port, clientFactory)
return d
def test_registerProducers(self):
"""
The proxy client registers itself as a producer of the proxy server and
vice versa.
"""
# create a ProxyServer instance
addr = address.IPv4Address('TCP', '127.0.0.1', 0)
server = portforward.ProxyFactory('127.0.0.1', 0).buildProtocol(addr)
# set the reactor for this test
reactor = proto_helpers.MemoryReactor()
server.reactor = reactor
# make the connection
serverTransport = proto_helpers.StringTransport()
server.makeConnection(serverTransport)
# check that the ProxyClientFactory is connecting to the backend
self.assertEqual(len(reactor.tcpClients), 1)
# get the factory instance and check it's the one we expect
host, port, clientFactory, timeout, _ = reactor.tcpClients[0]
self.assertIsInstance(clientFactory, portforward.ProxyClientFactory)
# Connect it
client = clientFactory.buildProtocol(addr)
clientTransport = proto_helpers.StringTransport()
client.makeConnection(clientTransport)
# check that the producers are registered
self.assertIdentical(clientTransport.producer, serverTransport)
self.assertIdentical(serverTransport.producer, clientTransport)
# check the streaming attribute in both transports
self.assertTrue(clientTransport.streaming)
self.assertTrue(serverTransport.streaming)
class StringTransportTestCase(unittest.TestCase):
"""
Test L{proto_helpers.StringTransport} helper behaviour.
"""
def test_noUnicode(self):
"""
Test that L{proto_helpers.StringTransport} doesn't accept unicode data.
"""
s = proto_helpers.StringTransport()
self.assertRaises(TypeError, s.write, u'foo')
| bsd-3-clause |
lightcn/odoo | addons/project_issue/__openerp__.py | 260 | 2329 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Issue Tracking',
'version': '1.0',
'category': 'Project Management',
'sequence': 9,
'summary': 'Support, Bug Tracker, Helpdesk',
'description': """
Track Issues/Bugs Management for Projects
=========================================
This application allows you to manage the issues you might face in a project like bugs in a system, client complaints or material breakdowns.
It allows the manager to quickly check the issues, assign them and decide on their status quickly as they evolve.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'depends': [
'sales_team',
'project',
],
'data': [
'project_issue_view.xml',
'project_issue_menu.xml',
'report/project_issue_report_view.xml',
'security/project_issue_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'project_issue_data.xml'
],
'demo': ['project_issue_demo.xml'],
'test': [
'test/issue_users.yml',
'test/subscribe_issue.yml',
'test/issue_process.yml',
'test/issue_demo.yml'
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jivesoftware/platform_cli | platform_cli/props.py | 1 | 3175 | #!/usr/bin/env python
# Copyright (C) 2013 Jive Software. All rights reserved.
"""High-level functions for manipulating a .properties file.
"""
import ConfigParser
import itertools
import os
import StringIO
FAKE_SECTION_NAME = 'fake_section'
class Error(Exception):
"""Base exception class for this module."""
class _AsPropsFile(object):
"""Read a .properties file with ConfigParser.
Fake a section heading so that ConfigParser can read the .properties file
as an .ini file.
"""
def __init__(self, file_obj, section_name='asection', allow_multiline_values=True):
"""Initialize the AsPropsFile .properties file wrappper."""
section_head = '[{}]\n'.format(section_name)
self.conf_lines = itertools.chain((section_head,), file_obj, ('',))
self.allow_multiline_values = allow_multiline_values
def readline(self):
"""Provide a readline method for readfp to call."""
line = self.conf_lines.next()
if not self.allow_multiline_values:
line = line.lstrip(' \t')
return line
def _open_props(file_path, create_new=False, allow_multiline_values=True):
"""Get a ConfigParser object with the contents of a .properties file."""
conf = ConfigParser.ConfigParser()
conf.optionxform = str # Otherwise ConfigParser auto-converts to lower.
if create_new and not os.path.exists(file_path):
try:
with open(file_path, 'w') as _:
pass
except IOError, err:
raise Error('Cannot create file at {}:\n{}'.format(
file_path, err))
try:
with open(file_path, 'r') as file_obj:
conf.readfp(_AsPropsFile(file_obj, FAKE_SECTION_NAME, allow_multiline_values))
except IOError, err:
raise Error('Cannot open file at {}:\n{}'.format(
file_path, err))
return conf
def _edit_props(file_path, mutation_func, create_new=False):
"""Safe edit to a .properties config file."""
conf = _open_props(file_path, create_new)
mutation_func(conf)
memoryfile = StringIO.StringIO()
conf.write(memoryfile)
temp_path = file_path + '.temp'
try:
with open(temp_path, 'w') as temp_conf_file:
for line in memoryfile.getvalue().split('\n')[1:]:
temp_conf_file.write('{}\n'.format(line))
except IOError, err:
raise Error('Cannot write to temp config file at {}:\n{}'.format(
temp_path, err))
try:
os.rename(temp_path, file_path)
except OSError, err:
raise Error('Cannot rename file {} to {}:\n{}'.format(
temp_path, file_path, err))
def get_items(file_path, create_new=False, allow_multiline_values=True):
"""Return a list of tuples in a .properties file."""
conf = _open_props(file_path, create_new, allow_multiline_values)
return conf.items(FAKE_SECTION_NAME)
def set_key(file_path, key, value, create_new=False):
"""Set key to value in a .properties file."""
_edit_props(file_path,
lambda x: x.set(FAKE_SECTION_NAME, key, value),
create_new)
def delete_key(file_path, key, create_new=False):
"""Delete a key from a .properties file."""
_edit_props(file_path,
lambda x: x.remove_option(FAKE_SECTION_NAME, key),
create_new)
| apache-2.0 |
xbmc/xbmc-antiquated | xbmc/lib/libPython/Python/Lib/test/test_errno.py | 39 | 2132 | #! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
import errno
from test.test_support import verbose
errors = ['E2BIG', 'EACCES', 'EADDRINUSE', 'EADDRNOTAVAIL', 'EADV',
'EAFNOSUPPORT', 'EAGAIN', 'EALREADY', 'EBADE', 'EBADF',
'EBADFD', 'EBADMSG', 'EBADR', 'EBADRQC', 'EBADSLT',
'EBFONT', 'EBUSY', 'ECHILD', 'ECHRNG', 'ECOMM',
'ECONNABORTED', 'ECONNREFUSED', 'ECONNRESET',
'EDEADLK', 'EDEADLOCK', 'EDESTADDRREQ', 'EDOM',
'EDQUOT', 'EEXIST', 'EFAULT', 'EFBIG', 'EHOSTDOWN',
'EHOSTUNREACH', 'EIDRM', 'EILSEQ', 'EINPROGRESS',
'EINTR', 'EINVAL', 'EIO', 'EISCONN', 'EISDIR',
'EL2HLT', 'EL2NSYNC', 'EL3HLT', 'EL3RST', 'ELIBACC',
'ELIBBAD', 'ELIBEXEC', 'ELIBMAX', 'ELIBSCN', 'ELNRNG',
'ELOOP', 'EMFILE', 'EMLINK', 'EMSGSIZE', 'EMULTIHOP',
'ENAMETOOLONG', 'ENETDOWN', 'ENETRESET', 'ENETUNREACH',
'ENFILE', 'ENOANO', 'ENOBUFS', 'ENOCSI', 'ENODATA',
'ENODEV', 'ENOENT', 'ENOEXEC', 'ENOLCK', 'ENOLINK',
'ENOMEM', 'ENOMSG', 'ENONET', 'ENOPKG', 'ENOPROTOOPT',
'ENOSPC', 'ENOSR', 'ENOSTR', 'ENOSYS', 'ENOTBLK',
'ENOTCONN', 'ENOTDIR', 'ENOTEMPTY', 'ENOTOBACCO', 'ENOTSOCK',
'ENOTTY', 'ENOTUNIQ', 'ENXIO', 'EOPNOTSUPP',
'EOVERFLOW', 'EPERM', 'EPFNOSUPPORT', 'EPIPE',
'EPROTO', 'EPROTONOSUPPORT', 'EPROTOTYPE',
'ERANGE', 'EREMCHG', 'EREMOTE', 'ERESTART',
'EROFS', 'ESHUTDOWN', 'ESOCKTNOSUPPORT', 'ESPIPE',
'ESRCH', 'ESRMNT', 'ESTALE', 'ESTRPIPE', 'ETIME',
'ETIMEDOUT', 'ETOOMANYREFS', 'ETXTBSY', 'EUNATCH',
'EUSERS', 'EWOULDBLOCK', 'EXDEV', 'EXFULL']
#
# This is a wee bit bogus since the module only conditionally adds
# errno constants if they have been defined by errno.h However, this
# test seems to work on SGI, Sparc & intel Solaris, and linux.
#
for error in errors:
try:
a = getattr(errno, error)
except AttributeError:
if verbose:
print '%s: not found' % error
else:
if verbose:
print '%s: %d' % (error, a)
| gpl-2.0 |
calfonso/ansible | lib/ansible/modules/network/f5/bigip_monitor_https.py | 7 | 18087 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_monitor_https
short_description: Manages F5 BIG-IP LTM https monitors
description: Manages F5 BIG-IP LTM https monitors.
version_added: 2.5
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(https)
parent on the C(Common) partition.
default: "/Common/https"
send:
description:
- The send string for the monitor call. When creating a new monitor, if
this value is not provided, the default C(GET /\\r\\n) will be used.
receive:
description:
- The receive string for the monitor call.
receive_disable:
description:
- This setting works like C(receive), except that the system marks the node
or pool member disabled when its response matches the C(receive_disable)
string but not C(receive). To use this setting, you must specify both
C(receive_disable) and C(receive).
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
target_username:
description:
- Specifies the user name, if the monitored target requires authentication.
target_password:
description:
- Specifies the password, if the monitored target requires authentication.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create HTTPS Monitor
bigip_monitor_https:
state: present
ip: 10.10.10.10
server: lb.mydomain.com
user: admin
password: secret
name: my_http_monitor
delegate_to: localhost
- name: Remove HTTPS Monitor
bigip_monitor_https:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_http_monitor
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: https
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'recv', 'send',
'destination', 'username', 'password'
]
returnables = [
'parent', 'send', 'receive', 'ip', 'port', 'interval', 'timeout',
'time_until_up'
]
updatables = [
'destination', 'send', 'receive', 'interval', 'timeout', 'time_until_up',
'target_username', 'target_password'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
@property
def username(self):
return self._values['target_username']
@property
def password(self):
return self._values['target_password']
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
try:
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
result = str(netaddr.IPAddress(self._values['ip']))
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'https'
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.send is None:
self.want.update({'send': 'GET /\r\n'})
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.https_s.https.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def exists(self):
result = self.client.api.tm.ltm.monitor.https_s.https.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.https_s.https.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.https_s.https.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.https_s.https.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/https'),
send=dict(),
receive=dict(),
receive_disable=dict(required=False),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
target_username=dict(),
target_password=dict(no_log=True),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
tschneidereit/servo | tests/wpt/web-platform-tests/tools/html5lib/setup.py | 418 | 1694 | from distutils.core import setup
import os
import codecs
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
packages = ['html5lib'] + ['html5lib.'+name
for name in os.listdir(os.path.join('html5lib'))
if os.path.isdir(os.path.join('html5lib', name)) and
not name.startswith('.') and name != 'tests']
current_dir = os.path.dirname(__file__)
with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
setup(name='html5lib',
version='0.9999-dev',
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=packages,
install_requires=[
'six',
],
)
| mpl-2.0 |
iandev/HarvestMood | Requests/requests/packages/urllib3/exceptions.py | 77 | 2557 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutError(RequestError):
"Raised when a socket timeout occurs."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| mit |
jiajiax/crosswalk-test-suite | stability/stability-embeddingapi-android-tests/inst.apk.py | 1 | 3492 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
cmd = "%s -s %s uninstall org.xwalkview.maximum.app" % (
ADB_CMD, PARAMETERS.device)
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
sleibrock/discord-bots | junkyard/hacker-bot.py | 1 | 16271 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Hacker bot
Designed as a way to interact with the host machine
Also comes with a Lis.py interpreter (thanks Peter Norvig)
Requires: cowsay, screenfetch, cowfortune
"""
from botinfo import *
from bs4 import BeautifulSoup as BS
from requests import get as re_get
from random import choice, random, randint
# Lisp interpreter imports
import math
import operator as op
from functools import reduce
# setting maximum recursion limits for Lisp evaluation
from sys import setrecursionlimit
setrecursionlimit(100)
bot_name = "hacker-bot"
client = discord.Client()
logger = create_logger(bot_name)
bot_data = create_filegen(bot_name)
max_slen = 300
max_data = 100
## Lisp Interpreter section
Symbol = str # A Lisp Symbol is implemented as a Python str
List = list # A Lisp List is implemented as a Python list
Number = (int, float) # A Lisp Number is implemented as a Python int or float
def tokenize(chars):
"""
Convert a string of characters into a list of tokens
TODO: work on joining strings together at this level
If mismatched quotes detected, raise SyntaxError
"""
old_tokens = chars.replace('(', ' ( ').replace(')', ' ) ').split()
return old_tokens
def parse(program):
"Read a Scheme expression from a string."
return read_from_tokens(tokenize(program))
def read_from_tokens(tokens):
"""
Read an expression from a sequence of tokens
TODO: add support for dicts, strings and lists
"""
if len(tokens) == 0:
raise SyntaxError('Unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('Unexpected )')
else:
return atom(token)
def atom(token):
"""
Numbers become numbers; every other token is a symbol
Order of checks: Bool, String, Int, Float, Symbol
Strings end with ' or "
"""
if token in ("True", "False"):
return bool(token)
if token == "#t": return True
if token == "#f": return False
if token.startswith("\"") and token.endswith("\""):
return str(token)
if token.startswith("'") and token.endswith("'"):
return str(token)
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
class Procedure(object):
"A user-defined Scheme procedure."
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
return lispeval(self.body, Env(self.parms, args, self.env))
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
self.update(zip(parms, args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
return self if (var in self) else self.outer.find(var)
def compose(*list_of_funcs):
"""
Compose a list of functions into a singular function
"""
if not isinstance(list_of_funcs, (tuple, list)):
raise SyntaxError("compose: No list given")
if not all(map(callable, list_of_funcs)):
raise SyntaxError("compose: Non-composable type given")
def whatever(in_var):
for func in list_of_funcs:
in_var = func(in_var)
return in_var
return whatever
def compareduce(f, *lst):
"""
A reduce modified to work for `f :: a -> b -> Bool` functions
If `f` ever evaluates to False, return False, else return True
This is mandatory because functools.reduce doesn't store previous
values for each pair in the computation chain
"""
if len(lst) <= 1:
raise SyntaxError(f"{f.__name__}: List too small for evaluation")
first_value = lst[0]
for next_value in lst[1:]:
if not f(first_value, next_value):
return False
first_value = next_value
return True
def standard_env():
"""
An environment with some Scheme standard procedures
Updated for Python 3 specifics and a lot more Lispy stuff
"""
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'append' : op.add,
'eq?' : op.is_,
'equal?' : op.eq,
'not' : op.not_,
'negate' : op.neg,
'int' : int,
'float' : float,
'bool' : bool,
'ord' : ord,
'chr' : chr,
'bin' : bin,
'oct' : oct,
'hex' : hex,
'hash' : hash,
'abs' : abs,
'length' : len,
'sum' : sum,
'any' : any,
'all' : all,
'max' : max,
'min' : min,
'reduce' : reduce,
'reverse' : reversed,
'random' : random,
'randint' : randint,
'choice' : choice,
'round' : round,
'compose' : compose,
'id' : lambda x : x,
'eq?' : lambda *x : compareduce(op.is_, x),
'equal?' : lambda *x : compareduce(op.eq, x),
'+' : lambda *x : reduce(op.add, x),
'append' : lambda *x : reduce(op.add, x),
'-' : lambda *x : reduce(op.sub, x),
'*' : lambda *x : reduce(op.mul, x),
'/' : lambda *x : reduce(op.truediv, x),
'%' : lambda *x : reduce(op.mod, x),
'mod' : lambda *x : reduce(op.mod, x),
'modulo' : lambda *x : reduce(op.mod, x),
'>' : lambda *x : all(map(f, zip(x, x[1:]))),
'>' : lambda *x : compareduce(op.gt, *x),
'<' : lambda *x : compareduce(op.lt, *x),
'>=' : lambda *x : compareduce(op.ge, *x),
'<=' : lambda *x : compareduce(op.le, *x),
'=' : lambda *x : compareduce(op.eq, *x),
'>>' : lambda x, y : op.rshift(x, y),
'<<' : lambda x, y : op.lshift(x, y),
'or' : lambda *x : reduce(lambda x, y: x or y, x),
'and' : lambda *x : reduce(lambda x, y: x and y, x),
'add1' : lambda x : x + 1,
'sub1' : lambda x : x - 1,
'range' : lambda x : list(range(x)),
'span' : lambda x, y : list(range(x, y)),
'enum' : lambda x : list(enumerate(x)),
'zip' : lambda x : list(zip(x)),
'^' : lambda x, y : pow(x, y),
'pow' : lambda x, y : pow(x, y),
'apply' : lambda f, x : f(*x),
'begin' : lambda *x : x[-1],
'cons' : lambda x, y : [x] + y,
'car' : lambda x : x[0],
'cdr' : lambda x : x[1:],
'head' : lambda x : x[0],
'tail' : lambda x : x[1:],
'take' : lambda x, y : x[:y],
'drop' : lambda x, y : x[y:],
'takewhile' : lambda x, y : None,
'dropwhile' : lambda x, y : None,
'list' : lambda *x : list(x),
'tuple' : lambda *x : tuple(x),
'dict' : lambda *x : dict(x),
'set' : lambda *x : set(x),
'frozenset' : lambda *x : frozenset(x),
'map' : lambda f, x : list(map(f, x)),
'filter' : lambda f, x : list(filter(f, x)),
'ormap' : lambda f, x : any(map(f, x)),
'andmap' : lambda f, x : all(map(f, x)),
'zero?' : lambda x : x == 0,
'empty?' : lambda x : len(x) == 0,
'pair?' : lambda x : len(x) == 2,
'procedure?' : lambda x : callable(x),
'number?' : lambda x : isinstance(x, Number),
'symbol?' : lambda x : isinstance(x, Symbol),
'string?' : lambda x : isinstance(x, str),
'bool?' : lambda x : isinstance(x, bool),
'odd?' : lambda x : bool((x % 2) == 1),
'even?' : lambda x : not bool((x %2) == 1),
'list?' : lambda x : isinstance(x, list),
'tuple?' : lambda x : isinstance(x, tuple),
'set?' : lambda x : isinstance(x, set),
'frozenset?' : lambda x : isinstance(x, frozenset),
'dict?' : lambda x : isinstance(x, dict),
'contains?' : lambda x, y : op.contains(x, y),
'foldl' : lambda o, i, x: reduce(o, x, i),
})
return env
global_env = standard_env()
def lispeval(x, env=global_env):
"""
Evaluate an expression in an environment
"""
if isinstance(x, Symbol): # variable reference
return env.find(x)[x]
elif not isinstance(x, List): # constant literal
return x
elif x[0] == 'quote': # quotation
(_, exp) = x
return exp
elif x[0] == 'if': # conditional
(_, test, conseq, alt) = x
exp = (conseq if lispeval(test, env) else alt)
return lispeval(exp, env)
elif x[0] == 'define': # definition
(_, var, exp) = x
env[var] = lispeval(exp, env)
elif x[0] == 'set!': # assignment
(_, var, exp) = x
env.find(var)[var] = lispeval(exp, env)
elif x[0] == 'lambda': # procedure
(_, parms, body) = x
return Procedure(parms, body, env)
else: # procedure call
proc = lispeval(x[0], env)
args = [lispeval(arg, env) for arg in x[1:]]
return proc(*args)
@register_command
async def e(msg, mobj):
"""
Interpret a Lisp expression using a Lisp-to-Python translator
Ex1: !e (define x (+ 20 40 pi))
Ex2: !e (map (lambda (x) (+ x 5)) (range 10))
"""
try:
result = lispeval(parse(msg))
if len(str(result)) + len(msg) > 1900:
return await client.send_message(mobj.channel, "Output too large")
return await client.send_message(mobj.channel,
pre_text("{}\n => {}".format(msg, result), "lisp"))
except Exception as ex:
return await client.send_message(mobj.channel, pre_text("{}: {}".format(type(ex).__name__, ex)))
return await client.send_mesage(mobj.channel, "Failed to compute expression")
## end lisp section
@register_command
async def info(msg, mobj):
"""
Return current channel/guild information
"""
chan = mobj.channel
serv = mobj.guild
chan_name, serv_name = None, None
chan_id, serv_id = 0, 0
if hasattr(chan, 'name'): chan_name = chan.name
if hasattr(serv, 'name'): serv_name = serv.name
if hasattr(chan, 'id'): chan_id = chan.id
if hasattr(serv, 'id'): serv_id = serv.id
msg = [
f"Channel: {chan.name}",
f"Channel ID: {chan_id}",
f"Server: {serv.name}",
f"Server ID: {serv_id}",
]
return await client.send_message(mobj.channel, pre_text("\n".join(msg)))
@register_command
async def uptime(msg, mobj):
"""
Return the uptime of the host system
"""
return await client.send_message(mobj.channel, pre_text(call("uptime")))
@register_command
async def free(msg, mobj):
"""
Return how much memory is free
"""
return await client.send_message(mobj.channel, pre_text(call("free -m")))
@register_command
async def vmstat(msg, mobj):
"""
Return raw memory stats from `vmstat`
"""
return await client.send_message(mobj.channel, pre_text(call("vmstat")))
@register_command
async def uname(msg, mobj):
"""
Return `uname -a` showing system and kernel version
"""
return await client.send_message(mobj.channel, pre_text(call("uname -a | cowsay")))
@register_command
async def cal(msg, mobj):
"""
Return the current month calendar
"""
return await client.send_message(mobj.channel, pre_text(call("cal")))
@register_command
async def screenfetch(msg, mobj):
"""
Return a screenfetch output
Example: !screenfetch
"""
return await client.send_message(mobj.channel, pre_text(call("screenfetch -N -n")))
@register_command
async def sed(msg, mobj):
"""
Sed the previous user's message (as opposed to just editing it)
Example: !sed s/hi/hello/g
"""
if msg == "":
return
auth = mobj.author.id
chan = mobj.channel
last_m = get_last_message(client, chan, auth).content.strip().replace("\"", "'")
return await client.send_message(chan, pre_text(call("echo \"{}\" | sed -s {}".format(last_m, msg))))
# The Git section
@register_command
async def update(msg, mobj):
"""
Execute a `git pull` to update the code
If there was a successful pull, the bot will quit and be restarted later
Example: !update
"""
print(msg)
result = call("git pull")
await client.send_message(mobj.channel, pre_text(result))
if result.strip() == "Already up-to-date.":
return
logger("Restarting self")
client.close()
return quit()
@register_command
async def commits(msg, mobj):
"""
Execute a `git log --oneline --graph --decorate=short | head -n 5`
Example: !commits
"""
return await client.send_message(
mobj.channel, pre_text(call("git log --oneline --graph --decorate=short | head -n 5")))
# The Cowsay section for cowtagging and cowsay
@register_command
async def cowtag(msg, mobj):
"""
Find the message before this one and add it to our cow list
Example: !cowtag
"""
chan = mobj.channel
cowlist = bot_data("{}.txt".format(chan.id))
last_m = get_last_message(client, chan)
if last_m is None:
return await client.send_message(chan, "Couldn't tag last message")
last_m = last_m.content.replace("\n", " ").replace("\r", " ").strip()[:max_slen]
last_m = last_m.replace("\"", "'")
lines = read_lines(cowlist)
lines.append(last_m)
if len(lines) > max_data:
lines.pop(0)
if not write_lines(cowlist, lines):
return await client.send_message(chan, "Error writing to file")
return await client.send_message(chan, "Bagged and tagged")
@register_command
async def cowsay(msg, mobj):
"""
Return a random cowsay message
Example: !cowsay
"""
chan = mobj.channel.id
cowlist = bot_data("{}.txt".format(chan))
cowlines = read_lines(cowlist)
if not cowlines:
return await client.send_message(mobj.channel, "No cow messages here")
rand = choice(cowlines)
if rand.strip() == "":
return await client.send_message(mobj.channel, "No cow messages here")
return await client.send_message(mobj.channel, pre_text(call("echo \"{}\" | cowsay".format(rand))))
@register_command
async def fortune(msg, mobj):
"""
Return a `fortune | cowsay` call
Example: !fortune
"""
return await client.send_message(mobj.channel, pre_text(call("fortune | cowsay")))
# search options
@register_command
async def hoogle(msg, mobj):
"""
Send a Haskell type signature to Hoogle and return the first result
If no results, error string
Ex1: !hoogle (a -> b) -> M a -> M b
Ex2: !hoogle >>=
"""
if contains_badwords(msg):
return
if msg == "":
return await client.send_message(mobj.channel, "Nothing sent")
base_url = "https://www.haskell.org/hoogle/?hoogle={}"
search = BS(re_get(base_url.format(url_replace(msg))).text, "html.parser")
answers = search.find_all("div", class_="ans")
if not answers:
return await client.send_message(mobj.channel, "Nothing was found, bud")
res = answers[0]
text = res.text
url = res.find("a", class_="a")["href"]
return await client.send_message(mobj.channel, "{}\n{}".format(text, url))
setup_all_events(client, bot_name, logger)
if __name__ == "__main__":
run_the_bot(client, bot_name, logger)
| mit |
yephper/django | tests/fixtures/tests.py | 1 | 81690 | <<<<<<< HEAD
from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import Article, ProxySpy, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(self,
disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
=======
from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import Article, ProxySpy, Spy, Tag, Visa
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Check that test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"Check that there were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model in excludes: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
"""
Verifies that the --app option works.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
available_apps = ['django.contrib.auth', 'django.contrib.contenttypes']
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(self,
disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
>>>>>>> 6448873197fa4e3df3f5f03201538dc57d7643d6
| bsd-3-clause |
echohenry2006/tvb-library | tvb/tests/library/datatypes/spectral_test.py | 3 | 6667 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Created on Mar 20, 2013
.. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codemart.ro>
"""
if __name__ == "__main__":
from tvb.tests.library import setup_test_console_env
setup_test_console_env()
import numpy
import unittest
from tvb.datatypes import spectral, time_series
from tvb.tests.library.base_testcase import BaseTestCase
class SpectralTest(BaseTestCase):
"""
Tests the defaults for `tvb.datatypes.spectral` module.
"""
def test_fourierspectrum(self):
data = numpy.random.random((10, 10))
ts = time_series.TimeSeries(data=data)
dt = spectral.FourierSpectrum(source=ts,
segment_length=100)
dt.configure()
summary_info = dt.summary_info
self.assertEqual(summary_info['Frequency step'], 0.01)
self.assertEqual(summary_info['Maximum frequency'], 0.5)
self.assertEqual(summary_info['Segment length'], 100)
self.assertEqual(summary_info['Windowing function'], '')
self.assertEqual(summary_info['Source'], '')
self.assertEqual(summary_info['Spectral type'], 'FourierSpectrum')
self.assertTrue(dt.aggregation_functions is None)
self.assertEqual(dt.normalised_average_power.shape, (0, ))
self.assertEqual(dt.segment_length, 100.0)
self.assertEqual(dt.shape, (0, ))
self.assertTrue(dt.source is not None)
self.assertEqual(dt.windowing_function, '')
def test_waveletcoefficients(self):
data = numpy.random.random((10, 10))
ts = time_series.TimeSeries(data=data)
dt = spectral.WaveletCoefficients(source=ts,
mother='morlet',
sample_period=7.8125,
frequencies=[0.008, 0.028, 0.048, 0.068],
normalisation="energy",
q_ratio=5.0,
array_data=numpy.random.random((10, 10)),)
dt.configure()
summary_info = dt.summary_info
self.assertEqual(summary_info['Maximum frequency'], 0.068)
self.assertEqual(summary_info['Minimum frequency'], 0.008)
self.assertEqual(summary_info['Normalisation'], 'energy')
self.assertEqual(summary_info['Number of scales'], 4)
self.assertEqual(summary_info['Q-ratio'], 5.0)
self.assertEqual(summary_info['Sample period'], 7.8125)
self.assertEqual(summary_info['Spectral type'], 'WaveletCoefficients')
self.assertEqual(summary_info['Wavelet type'], 'morlet')
self.assertEqual(dt.q_ratio, 5.0)
self.assertEqual(dt.sample_period, 7.8125)
self.assertEqual(dt.shape, (10, 10))
self.assertTrue(dt.source is not None)
def test_coherencespectrum(self):
data = numpy.random.random((10, 10))
ts = time_series.TimeSeries(data=data)
dt = spectral.CoherenceSpectrum(source=ts,
nfft = 4,
array_data = numpy.random.random((10, 10)),
frequency = numpy.random.random((10,)))
summary_info = dt.summary_info
self.assertEqual(summary_info['Number of frequencies'], 10)
self.assertEqual(summary_info['Spectral type'], 'CoherenceSpectrum')
self.assertEqual(summary_info['FFT length (time-points)'], 4)
self.assertEqual(summary_info['Source'], '')
self.assertEqual(dt.nfft, 4)
self.assertEqual(dt.shape, (10, 10))
self.assertTrue(dt.source is not None)
def test_complexcoherence(self):
data = numpy.random.random((10, 10))
ts = time_series.TimeSeries(data=data)
dt = spectral.ComplexCoherenceSpectrum(source=ts,
array_data = numpy.random.random((10, 10)),
cross_spectrum = numpy.random.random((10, 10)),
epoch_length = 10,
segment_length = 5)
summary_info = dt.summary_info
self.assertEqual(summary_info['Frequency step'], 0.2)
self.assertEqual(summary_info['Maximum frequency'], 0.5)
self.assertEqual(summary_info['Source'], '')
self.assertEqual(summary_info['Spectral type'], 'ComplexCoherenceSpectrum')
self.assertTrue(dt.aggregation_functions is None)
self.assertEqual(dt.epoch_length, 10)
self.assertEqual(dt.segment_length, 5)
self.assertEqual(dt.shape, (10, 10))
self.assertTrue(dt.source is not None)
self.assertEqual(dt.windowing_function, '')
def suite():
"""
Gather all the tests in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(SpectralTest))
return test_suite
if __name__ == "__main__":
#So you can run tests from this package individually.
TEST_RUNNER = unittest.TextTestRunner()
TEST_SUITE = suite()
TEST_RUNNER.run(TEST_SUITE) | gpl-2.0 |
wikimedia/analytics-user-metrics | user_metrics/api/engine/request_manager.py | 1 | 18101 | """
This module implements the request manager functionality.
Job Queue and Processing
^^^^^^^^^^^^^^^^^^^^^^^^
As requests are issued via http to the API a process queue will store all
active jobs. Processes will be created and assume one of the following
states throughout their existence: ::
* 'pending' - The request has yet to be begin being processed
* 'running' - The request is being processed
* 'success' - The request has finished processing and is exposed at
the url
* 'failure' - The result has finished processing but dailed to expose
results
When a process a request is received and a job is created to service that
request it enters the 'pending' state. If the job returns without
exception it enters the 'success' state, otherwise it enters the 'failure'
state. The job remains in either of these states until it is cleared
from the process queue.
Response Data
^^^^^^^^^^^^^
As requests are made to the API the data generated and formatted as JSON.
The definition of is as follows: ::
{ header : header_list,
cohort_expr : cohort_gen_timestamp : metric : timeseries :
aggregator : start : end : [ metric_param : ]* : data
}
Where each component is defined: ::
header_str := list(str), list of header values
cohort_expr := str, cohort ID expression
cohort_gen_timestamp := str, cohort generation timestamp (earliest of
all cohorts in expression)
metric := str, user metric handle
timeseries := boolean, indicates if this is a timeseries
aggregator := str, aggregator used
start := str, start datetime of request
end := str, end datetime of request
metric_param := -, optional metric parameters
data := list(tuple), set of data points
Request data is mapped to a query via metric objects and hashed in the
dictionary `api_data`.
Request Flow Management
^^^^^^^^^^^^^^^^^^^^^^^
This portion of the module defines a set of methods useful in handling
series of metrics objects to build more complex results. This generally
involves creating one or more UserMetric derived objects with passed
parameters to service a request. The primary entry point is the
``process_data_request`` method. This method coordinates requests for
three different top-level request types:
- **Raw requests**. Output is a set of datapoints that consist of the
user IDs accompanied by metric results.
- **Aggregate requests**. Output is an aggregate of all user results based
on the type of aggregaion as defined in the aggregator module.
- **Time series requests**. Outputs a time series list of data. For this
type of request a start and end time must be defined along with an
interval length. Further an aggregator must be provided which operates
on each time interval.
Also defined are metric types for which requests may be made with
``metric_dict``, and the types of aggregators that may be called on metrics
``aggregator_dict``, and also the meta data around how many threads may be
used to process metrics ``USER_THREADS`` and ``REVISION_THREADS``.
"""
__author__ = {
"ryan faulkner": "rfaulkner@wikimedia.org"
}
__date__ = "2013-03-05"
__license__ = "GPL (version 2 or later)"
from user_metrics.config import logging, settings
from user_metrics.api import MetricsAPIError, error_codes, query_mod, \
REQUEST_BROKER_TARGET, umapi_broker_context,\
RESPONSE_BROKER_TARGET, PROCESS_BROKER_TARGET
from user_metrics.api.engine import pack_response_for_broker, \
RESQUEST_TIMEOUT, MAX_BLOCK_SIZE, MAX_CONCURRENT_JOBS
from user_metrics.api.engine.data import get_users
from user_metrics.api.engine.request_meta import build_request_obj
from user_metrics.metrics.users import MediaWikiUser
from user_metrics.metrics.user_metric import UserMetricError
from user_metrics.etl.aggregator import aggregator as agg_engine
from multiprocessing import Process, Queue
from collections import namedtuple
from os import getpid
from sys import getsizeof
import time
from hashlib import sha1
# API JOB HANDLER
# ###############
# Defines the job item type used to temporarily store job progress
job_item_type = namedtuple('JobItem', 'id process request queue')
def job_control():
"""
Controls the execution of user metrics requests
Parameters
~~~~~~~~~~
request_queue : multiprocessing.Queue
Queues incoming API requests.
"""
# Store executed and pending jobs respectively
job_queue = list()
# Global job ID number
job_id = 0
# Tallies the number of concurrently running jobs
concurrent_jobs = 0
log_name = '{0} :: {1}'.format(__name__, job_control.__name__)
logging.debug('{0} - STARTING...'.format(log_name))
while 1:
time.sleep(RESQUEST_TIMEOUT)
# Request Queue Processing
# ------------------------
# logging.debug(log_name + ' :: POLLING REQUESTS...')
logging.debug(log_name + ' :: JOB QUEUE - {0}'.format(str(job_queue)))
req_item = None
# Only process if there are fewer than the maximum number of concurrent
# jobs
if concurrent_jobs < MAX_CONCURRENT_JOBS:
# Pop from request target
req_item = umapi_broker_context.pop(REQUEST_BROKER_TARGET)
# Push to process target
if req_item:
url_hash = sha1(req_item.encode('utf-8')).hexdigest()
umapi_broker_context.add(PROCESS_BROKER_TARGET, url_hash,
req_item)
logging.debug(log_name + ' :: PULLING item from request queue -> '
'\n\t{0}'
.format(req_item))
# Process complete jobs
# ---------------------
if concurrent_jobs:
for job_item in job_queue:
if not job_item.queue.empty():
logging.info(log_name + ' :: READING RESPONSE - {0}'.
format(job_item.request))
# Pull data off of the queue and add it to response queue
data = ''
while not job_item.queue.empty():
data += job_item.queue.get(True)
# Remove from process target
url_hash = sha1(job_item.request.encode('utf-8')).hexdigest()
try:
umapi_broker_context.remove(PROCESS_BROKER_TARGET,
url_hash)
except Exception as e:
logging.error(log_name + ' :: Could not process '
'{0} from {1} -- {2}'.
format(job_item.request,
PROCESS_BROKER_TARGET,
e.message))
# Add to response target
umapi_broker_context.add(RESPONSE_BROKER_TARGET, url_hash,
pack_response_for_broker(
job_item.request, data))
del job_queue[job_queue.index(job_item)]
concurrent_jobs -= 1
logging.debug(log_name + ' :: RUN -> RESPONSE - Job ID {0}'
'\n\tConcurrent jobs = {1}'
.format(str(job_item.id), concurrent_jobs))
# Process request
# ---------------
if req_item:
req_q = Queue()
proc = Process(target=process_metrics, args=(req_q, req_item))
proc.start()
job_item = job_item_type(job_id, proc, req_item, req_q)
job_queue.append(job_item)
concurrent_jobs += 1
job_id += 1
logging.debug(log_name + ' :: WAIT -> RUN - Job ID {0}'
'\n\tConcurrent jobs = {1}, REQ = {2}'
.format(str(job_id), concurrent_jobs, req_item))
logging.debug('{0} - FINISHING.'.format(log_name))
def process_metrics(p, request_url):
"""
Worker process for requests, forked from the job controller. This
method handles:
* Filtering cohort type: "regular" cohort, single user, user group
* Secondary validation
*
"""
log_name = '{0} :: {1}'.format(__name__, process_metrics.__name__)
logging.info(log_name + ' :: START JOB'
'\n\t{0} - PID = {1})'.
format(request_url, getpid()))
err_msg = __name__ + ' :: Request failed.'
users = list()
try:
request_obj = build_request_obj(request_url)
except MetricsAPIError as e:
# TODO - flag job as failed
return
# obtain user list - handle the case where a lone user ID is passed
# !! The username should already be validated
if request_obj.is_user:
uid = MediaWikiUser.is_user_name(request_obj.cohort_expr,
request_obj.project)
if uid:
valid = True
users = [uid]
else:
valid = False
err_msg = error_codes[3]
# The "all" user group. All users within a time period.
elif request_obj.cohort_expr == 'all':
users = MediaWikiUser(query_type=1)
try:
users = [u for u in users.get_users(
request_obj.start, request_obj.end,
project=request_obj.project)]
valid = True
except Exception:
valid = False
err_msg = error_codes[5]
# "TYPICAL" COHORT PROCESSING
else:
users = get_users(request_obj.cohort_expr)
# Default project is what is stored in usertags_meta
project = query_mod.get_cohort_project_by_meta(
request_obj.cohort_expr)
if project:
request_obj.project = project
logging.debug(log_name + ' :: Using default project from '
'usertags_meta {0}.'.format(project))
valid = True
err_msg = ''
if valid:
# process request
results = process_data_request(request_obj, users)
results = str(results)
response_size = getsizeof(results, None)
if response_size > MAX_BLOCK_SIZE:
index = 0
# Dump the data in pieces - block until it is picked up
while index < response_size:
p.put(results[index:index+MAX_BLOCK_SIZE], block=True)
index += MAX_BLOCK_SIZE
else:
p.put(results, block=True)
logging.info(log_name + ' :: END JOB'
'\n\tCOHORT = {0}- METRIC = {1} - PID = {2})'.
format(request_obj.cohort_expr, request_obj.metric,
getpid()))
else:
p.put(err_msg, block=True)
logging.info(log_name + ' :: END JOB - FAILED.'
'\n\tCOHORT = {0}- METRIC = {1} - PID = {2})'.
format(request_obj.cohort_expr, request_obj.metric,
getpid()))
# REQUEST FLOW HANDLER
# ###################
from dateutil.parser import parse as date_parse
from copy import deepcopy
from user_metrics.etl.data_loader import DataLoader
import user_metrics.metrics.user_metric as um
import user_metrics.etl.time_series_process_methods as tspm
from user_metrics.api.engine.request_meta import ParameterMapping
from user_metrics.api.engine.response_meta import format_response
from user_metrics.api.engine import DATETIME_STR_FORMAT
from user_metrics.api.engine.request_meta import get_agg_key, \
get_aggregator_type, request_types
INTERVALS_PER_THREAD = 10
MAX_THREADS = 5
USER_THREADS = settings.__user_thread_max__
REVISION_THREADS = settings.__rev_thread_max__
DEFAULT_INERVAL_LENGTH = 24
# create shorthand method refs
to_string = DataLoader().cast_elems_to_string
def process_data_request(request_meta, users):
"""
Main entry point of the module, prepares results for a given request.
Coordinates a request based on the following parameters::
metric_handle (string) - determines the type of metric object to
build. Keys metric_dict.
users (list) - list of user IDs.
**kwargs - Keyword arguments may contain a variety of variables.
Most notably, "aggregator" if the request requires aggregation,
"time_series" flag indicating a time series request. The
remaining kwargs specify metric object parameters.
"""
# Set interval length in hours if not present
if not request_meta.slice:
request_meta.slice = DEFAULT_INERVAL_LENGTH
else:
request_meta.slice = float(request_meta.slice)
# Get the aggregator key
agg_key = get_agg_key(request_meta.aggregator, request_meta.metric) if \
request_meta.aggregator else None
args = ParameterMapping.map(request_meta)
# Initialize the results
results, metric_class, metric_obj = format_response(request_meta)
start = metric_obj.datetime_start
end = metric_obj.datetime_end
if results['type'] == request_types.time_series:
# Get aggregator
try:
aggregator_func = get_aggregator_type(agg_key)
except MetricsAPIError as e:
results['data'] = 'Request failed. ' + e.message
return results
# Determine intervals and thread allocation
total_intervals = (date_parse(end) - date_parse(start)).\
total_seconds() / (3600 * request_meta.slice)
time_threads = max(1, int(total_intervals / INTERVALS_PER_THREAD))
time_threads = min(MAX_THREADS, time_threads)
logging.info(__name__ + ' :: Initiating time series for %(metric)s\n'
'\tAGGREGATOR = %(agg)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'agg': request_meta.aggregator,
'start': str(start),
'end': str(end),
})
metric_threads = '"k_" : {0}, "kr_" : {1}'.format(USER_THREADS,
REVISION_THREADS)
metric_threads = '{' + metric_threads + '}'
new_kwargs = deepcopy(args)
del new_kwargs['slice']
del new_kwargs['aggregator']
del new_kwargs['datetime_start']
del new_kwargs['datetime_end']
out = tspm.build_time_series(start,
end,
request_meta.slice,
metric_class,
aggregator_func,
users,
kt_=time_threads,
metric_threads=metric_threads,
log=True,
**new_kwargs)
results['header'] = ['timestamp'] + \
getattr(aggregator_func, um.METRIC_AGG_METHOD_HEAD)
for row in out:
timestamp = date_parse(row[0][:19]).strftime(
DATETIME_STR_FORMAT)
results['data'][timestamp] = row[3:]
elif results['type'] == request_types.aggregator:
# Get aggregator
try:
aggregator_func = get_aggregator_type(agg_key)
except MetricsAPIError as e:
results['data'] = 'Request failed. ' + e.message
return results
logging.info(__name__ + ' :: Initiating aggregator for %(metric)s\n'
'\AGGREGATOR = %(agg)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'agg': request_meta.aggregator,
'start': str(start),
'end': str(end),
})
try:
metric_obj.process(users,
k_=USER_THREADS,
kr_=REVISION_THREADS,
log_=True,
**args)
except UserMetricError as e:
logging.error(__name__ + ' :: Metrics call failed: ' + str(e))
results['data'] = str(e)
return results
r = agg_engine(aggregator_func, metric_obj, metric_obj.header())
results['header'] = to_string(r.header)
results['data'] = r.data[1:]
elif results['type'] == request_types.raw:
logging.info(__name__ + ':: Initiating raw request for %(metric)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'start': str(start),
'end': str(end),
})
try:
metric_obj.process(users,
k_=USER_THREADS,
kr_=REVISION_THREADS,
log_=True,
**args)
except UserMetricError as e:
logging.error(__name__ + ' :: Metrics call failed: ' + str(e))
results['data'] = str(e)
return results
for m in metric_obj.__iter__():
results['data'][m[0]] = m[1:]
return results
| bsd-3-clause |
ahmed-mahran/hue | desktop/core/ext-py/Django-1.6.10/tests/backends/tests.py | 46 | 40386 | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import copy
import datetime
from decimal import Decimal
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.util import format_number
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.fields import (AutoField, DateField, DateTimeField,
DecimalField, IntegerField, TimeField)
from django.db.utils import ConnectionHandler
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings, str_prefix
from django.utils import six, unittest
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle quote_name semantics")
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
cursor = connection.cursor()
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([six.text_type(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)', [long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5, 0, 13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
persons = models.Reporter.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = persons.query.sql_with_params()
cursor = persons.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTests(TestCase):
@unittest.skipUnless(
connection.vendor == 'postgresql',
"This test applies only to PostgreSQL")
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
@unittest.skipUnless(
connection.vendor == 'postgresql',
"This test applies only to PostgreSQL")
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
# For Oracle, when you want to select a value, you need to specify the
# special pseudo-table 'dual'; a select with no from clause is invalid.
bare_select_suffix = " FROM DUAL" if connection.vendor == 'oracle' else ""
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class SqliteAggregationTests(TestCase):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to check SQLite aggregation semantics")
def test_aggregation(self):
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
class SqliteChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to do SQLite checks")
def test_convert_values_to_handle_null_value(self):
database_operations = DatabaseOperations(connection)
self.assertEqual(
None,
database_operations.convert_values(None, AutoField(primary_key=True))
)
self.assertEqual(
None,
database_operations.convert_values(None, DateField())
)
self.assertEqual(
None,
database_operations.convert_values(None, DateTimeField())
)
self.assertEqual(
None,
database_operations.convert_values(None, DecimalField())
)
self.assertEqual(
None,
database_operations.convert_values(None, IntegerField())
)
self.assertEqual(
None,
database_operations.convert_values(None, TimeField())
)
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle=='format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle=='pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of paramters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of paramters
args = [{'root': i, 'square': i**2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i**2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i**2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
connection.cursor().close()
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retreive the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
available_apps = ['backends']
def test_can_reference_existant(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existant(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
| apache-2.0 |
grlee77/nipype | nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py | 9 | 1171 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.surface import ProbeVolumeWithModel
def test_ProbeVolumeWithModel_inputs():
input_map = dict(InputModel=dict(argstr='%s',
position=-2,
),
InputVolume=dict(argstr='%s',
position=-3,
),
OutputModel=dict(argstr='%s',
hash_files=False,
position=-1,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
terminal_output=dict(nohash=True,
),
)
inputs = ProbeVolumeWithModel.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ProbeVolumeWithModel_outputs():
output_map = dict(OutputModel=dict(position=-1,
),
)
outputs = ProbeVolumeWithModel.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
natecook1000/swift | utils/gyb_syntax_support/Token.py | 3 | 13979 | from Classification import classification_by_name
from Node import error
from kinds import lowercase_first_word
class Token(object):
"""
Represents the specification for a Token in the TokenSyntax file.
"""
def __init__(self, name, kind, serialization_code, unprefixed_kind=None,
text=None, classification='None', is_keyword=False):
self.name = name
self.kind = kind
if unprefixed_kind is None:
self.unprefixed_kind = kind
else:
self.unprefixed_kind = unprefixed_kind
self.serialization_code = serialization_code
self.text = text or ""
self.classification = classification_by_name(classification)
self.is_keyword = is_keyword
def swift_kind(self):
name = lowercase_first_word(self.name)
if self.is_keyword:
return name + 'Keyword'
return name
class Keyword(Token):
"""
Represents a keyword token.
"""
def __init__(self, name, text, serialization_code,
classification='Keyword'):
Token.__init__(self, name, 'kw_' + text, serialization_code,
unprefixed_kind=text, text=text,
classification=classification, is_keyword=True)
def macro_name(self):
return "KEYWORD"
class SwiftKeyword(Keyword):
def macro_name(self):
return "SWIFT_KEYWORD"
class DeclKeyword(SwiftKeyword):
def macro_name(self):
return "DECL_KEYWORD"
class StmtKeyword(SwiftKeyword):
def macro_name(self):
return "STMT_KEYWORD"
class ExprKeyword(SwiftKeyword):
def macro_name(self):
return "EXPR_KEYWORD"
class PatternKeyword(SwiftKeyword):
def macro_name(self):
return "PAT_KEYWORD"
class SilKeyword(Keyword):
def macro_name(self):
return "SIL_KEYWORD"
class PoundKeyword(Token):
def __init__(self, name, kind, text, serialization_code,
classification='Keyword'):
Token.__init__(self, name, 'pound_' + kind, serialization_code,
unprefixed_kind=kind, text=text,
classification=classification, is_keyword=True)
def macro_name(self):
return "POUND_KEYWORD"
class PoundObjectLiteral(PoundKeyword):
def __init__(self, name, kind, text, serialization_code, description,
protocol, classification='ObjectLiteral'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
self.description = description
self.protocol = protocol
def macro_name(self):
return "POUND_OBJECT_LITERAL"
class PoundConfig(PoundKeyword):
def macro_name(self):
return "POUND_CONFIG"
class PoundDirectiveKeyword(PoundKeyword):
def __init__(self, name, kind, text, serialization_code,
classification='PoundDirectiveKeyword'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
def macro_name(self):
return "POUND_DIRECTIVE_KEYWORD"
class PoundConditionalDirectiveKeyword(PoundDirectiveKeyword):
def __init__(self, name, kind, text, serialization_code,
classification='PoundDirectiveKeyword'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
def macro_name(self):
return "POUND_COND_DIRECTIVE_KEYWORD"
class Punctuator(Token):
def macro_name(self):
return "PUNCTUATOR"
class Literal(Token):
def macro_name(self):
return "LITERAL"
class Misc(Token):
def macro_name(self):
return "MISC"
SYNTAX_TOKENS = [
# Keywords that start decls
DeclKeyword('Associatedtype', 'associatedtype', serialization_code=1),
DeclKeyword('Class', 'class', serialization_code=2),
DeclKeyword('Deinit', 'deinit', serialization_code=3),
DeclKeyword('Enum', 'enum', serialization_code=4),
DeclKeyword('Extension', 'extension', serialization_code=5),
DeclKeyword('Func', 'func', serialization_code=6),
DeclKeyword('Import', 'import', serialization_code=7),
DeclKeyword('Init', 'init', serialization_code=8),
DeclKeyword('Inout', 'inout', serialization_code=9),
DeclKeyword('Let', 'let', serialization_code=10),
DeclKeyword('Operator', 'operator', serialization_code=11),
DeclKeyword('Precedencegroup', 'precedencegroup', serialization_code=12),
DeclKeyword('Protocol', 'protocol', serialization_code=13),
DeclKeyword('Struct', 'struct', serialization_code=14),
DeclKeyword('Subscript', 'subscript', serialization_code=15),
DeclKeyword('Typealias', 'typealias', serialization_code=16),
DeclKeyword('Var', 'var', serialization_code=17),
DeclKeyword('Fileprivate', 'fileprivate', serialization_code=18),
DeclKeyword('Internal', 'internal', serialization_code=19),
DeclKeyword('Private', 'private', serialization_code=20),
DeclKeyword('Public', 'public', serialization_code=21),
DeclKeyword('Static', 'static', serialization_code=22),
# Statement keywords
StmtKeyword('Defer', 'defer', serialization_code=23),
StmtKeyword('If', 'if', serialization_code=24),
StmtKeyword('Guard', 'guard', serialization_code=25),
StmtKeyword('Do', 'do', serialization_code=26),
StmtKeyword('Repeat', 'repeat', serialization_code=27),
StmtKeyword('Else', 'else', serialization_code=28),
StmtKeyword('For', 'for', serialization_code=29),
StmtKeyword('In', 'in', serialization_code=30),
StmtKeyword('While', 'while', serialization_code=31),
StmtKeyword('Return', 'return', serialization_code=32),
StmtKeyword('Break', 'break', serialization_code=33),
StmtKeyword('Continue', 'continue', serialization_code=34),
StmtKeyword('Fallthrough', 'fallthrough', serialization_code=35),
StmtKeyword('Switch', 'switch', serialization_code=36),
StmtKeyword('Case', 'case', serialization_code=37),
StmtKeyword('Default', 'default', serialization_code=38),
StmtKeyword('Where', 'where', serialization_code=39),
StmtKeyword('Catch', 'catch', serialization_code=40),
StmtKeyword('Throw', 'throw', serialization_code=50),
# Expression keywords
ExprKeyword('As', 'as', serialization_code=41),
ExprKeyword('Any', 'Any', serialization_code=42),
ExprKeyword('False', 'false', serialization_code=43),
ExprKeyword('Is', 'is', serialization_code=44),
ExprKeyword('Nil', 'nil', serialization_code=45),
ExprKeyword('Rethrows', 'rethrows', serialization_code=46),
ExprKeyword('Super', 'super', serialization_code=47),
ExprKeyword('Self', 'self', serialization_code=48),
ExprKeyword('CapitalSelf', 'Self', serialization_code=49),
ExprKeyword('True', 'true', serialization_code=51),
ExprKeyword('Try', 'try', serialization_code=52),
ExprKeyword('Throws', 'throws', serialization_code=53),
Keyword('__FILE__', '__FILE__', serialization_code=54),
Keyword('__LINE__', '__LINE__', serialization_code=55),
Keyword('__COLUMN__', '__COLUMN__', serialization_code=56),
Keyword('__FUNCTION__', '__FUNCTION__', serialization_code=57),
Keyword('__DSO_HANDLE__', '__DSO_HANDLE__', serialization_code=58),
# Pattern keywords
PatternKeyword('Wildcard', '_', serialization_code=59),
# Punctuators
Punctuator('LeftParen', 'l_paren', text='(', serialization_code=88),
Punctuator('RightParen', 'r_paren', text=')', serialization_code=89),
Punctuator('LeftBrace', 'l_brace', text='{', serialization_code=90),
Punctuator('RightBrace', 'r_brace', text='}', serialization_code=91),
Punctuator('LeftSquareBracket', 'l_square', text='[',
serialization_code=92),
Punctuator('RightSquareBracket', 'r_square', text=']',
serialization_code=93),
Punctuator('LeftAngle', 'l_angle', text='<', serialization_code=94),
Punctuator('RightAngle', 'r_angle', text='>', serialization_code=95),
Punctuator('Period', 'period', text='.', serialization_code=85),
Punctuator('PrefixPeriod', 'period_prefix', text='.',
serialization_code=87),
Punctuator('Comma', 'comma', text=',', serialization_code=84),
Punctuator('Colon', 'colon', text=':', serialization_code=82),
Punctuator('Semicolon', 'semi', text=';', serialization_code=83),
Punctuator('Equal', 'equal', text='=', serialization_code=86),
Punctuator('AtSign', 'at_sign', text='@', classification='Attribute',
serialization_code=80),
Punctuator('Pound', 'pound', text='#', serialization_code=81),
Punctuator('PrefixAmpersand', 'amp_prefix', text='&',
serialization_code=96),
Punctuator('Arrow', 'arrow', text='->', serialization_code=78),
Punctuator('Backtick', 'backtick', text='`', serialization_code=79),
Punctuator('Backslash', 'backslash', text='\\\\', serialization_code=100),
Punctuator('ExclamationMark', 'exclaim_postfix', text='!',
serialization_code=99),
Punctuator('PostfixQuestionMark', 'question_postfix', text='?',
serialization_code=97),
Punctuator('InfixQuestionMark', 'question_infix', text='?',
serialization_code=98),
Punctuator('StringQuote', 'string_quote', text='\\\"',
classification='StringLiteral', serialization_code=102),
Punctuator('MultilineStringQuote', 'multiline_string_quote',
text='\\\"\\\"\\\"', classification='StringLiteral',
serialization_code=103),
# Keywords prefixed with a '#'.
PoundKeyword('PoundKeyPath', 'keyPath', text='#keyPath',
serialization_code=74),
PoundKeyword('PoundLine', 'line', text='#line',
serialization_code=69),
PoundKeyword('PoundSelector', 'selector', text='#selector',
serialization_code=73),
PoundKeyword('PoundFile', 'file', text='#file',
serialization_code=68),
PoundKeyword('PoundColumn', 'column', text='#column',
serialization_code=70),
PoundKeyword('PoundFunction', 'function', text='#function',
serialization_code=72),
PoundKeyword('PoundDsohandle', 'dsohandle', text='#dsohandle',
serialization_code=71),
PoundDirectiveKeyword('PoundSourceLocation', 'sourceLocation',
text='#sourceLocation', serialization_code=65),
PoundDirectiveKeyword('PoundWarning', 'warning', text='#warning',
serialization_code=66),
PoundDirectiveKeyword('PoundError', 'error', text='#error',
serialization_code=67),
PoundConditionalDirectiveKeyword('PoundIf', 'if', text='#if',
serialization_code=64),
PoundConditionalDirectiveKeyword('PoundElse', 'else', text='#else',
serialization_code=62),
PoundConditionalDirectiveKeyword('PoundElseif', 'elseif',
text='#elseif', serialization_code=63),
PoundConditionalDirectiveKeyword('PoundEndif', 'endif',
text='#endif', serialization_code=61),
PoundConfig('PoundAvailable', 'available', text='#available',
serialization_code=60),
PoundObjectLiteral('PoundFileLiteral', 'fileLiteral',
text='#fileLiteral', serialization_code=76,
description='file reference',
protocol='ExpressibleByFileReferenceLiteral'),
PoundObjectLiteral('PoundImageLiteral', 'imageLiteral',
text='#imageLiteral', serialization_code=77,
description='image',
protocol='ExpressibleByImageLiteral'),
PoundObjectLiteral('PoundColorLiteral', 'colorLiteral',
text='#colorLiteral', serialization_code=75,
description='color',
protocol='ExpressibleByColorLiteral'),
Literal('IntegerLiteral', 'integer_literal',
classification='IntegerLiteral', serialization_code=111),
Literal('FloatingLiteral', 'floating_literal',
classification='FloatingLiteral', serialization_code=112),
Literal('StringLiteral', 'string_literal',
classification='StringLiteral', serialization_code=113),
Misc('Unknown', 'unknown', serialization_code=115),
Misc('Identifier', 'identifier', classification=None,
serialization_code=105),
Misc('UnspacedBinaryOperator', 'oper_binary_unspaced',
serialization_code=107),
Misc('SpacedBinaryOperator', 'oper_binary_spaced', serialization_code=108),
Misc('PostfixOperator', 'oper_postfix', serialization_code=110),
Misc('PrefixOperator', 'oper_prefix', serialization_code=109),
Misc('DollarIdentifier', 'dollarident', classification='DollarIdentifier',
serialization_code=106),
Misc('ContextualKeyword', 'contextual_keyword', classification='Keyword',
serialization_code=114),
Misc('StringSegment', 'string_segment', classification='StringLiteral',
serialization_code=104),
Misc('StringInterpolationAnchor', 'string_interpolation_anchor',
text=')', classification='StringInterpolationAnchor',
serialization_code=101),
Misc('Yield', 'kw_yield', serialization_code=116, text='yield'),
]
SYNTAX_TOKEN_MAP = {token.name + 'Token': token for token in SYNTAX_TOKENS}
def verify_no_duplicate_serialization_codes(tokens):
used_codes = set()
for token in tokens:
if token.serialization_code in used_codes:
error("Serialization code %d used twice for tokens" %
token.serialization_code)
used_codes.add(token.serialization_code)
verify_no_duplicate_serialization_codes(SYNTAX_TOKENS)
| apache-2.0 |
ramitalat/odoo | addons/l10n_ma/__openerp__.py | 260 | 2074 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 kazacube (http://kazacube.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Maroc - Accounting',
'version' : '1.0',
'author' : 'kazacube',
'category' : 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Maroc.
=================================================================
Ce Module charge le modèle du plan de comptes standard Marocain et permet de
générer les états comptables aux normes marocaines (Bilan, CPC (comptes de
produits et charges), balance générale à 6 colonnes, Grand livre cumulatif...).
L'intégration comptable a été validé avec l'aide du Cabinet d'expertise comptable
Seddik au cours du troisième trimestre 2010.""",
'website': 'http://www.kazacube.com',
'depends' : ['base', 'account'],
'data' : [
'security/ir.model.access.csv',
'account_type.xml',
'account_pcg_morocco.xml',
'l10n_ma_wizard.xml',
'l10n_ma_tax.xml',
'l10n_ma_journal.xml',
],
'demo' : [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
waveFrontSet/box_management | config/wsgi.py | 1 | 1714 | """
WSGI config for box_management project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
senttech/Cura | plugins/RemovableDriveOutputDevice/WindowsRemovableDrivePlugin.py | 2 | 5890 | # Copyright (c) 2015 Ultimaker B.V.
# Copyright (c) 2013 David Braam
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
from . import RemovableDrivePlugin
import string
import ctypes
from ctypes import wintypes # Using ctypes.wintypes in the code below does not seem to work
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
# WinAPI Constants that we need
# Hardcoded here due to stupid WinDLL stuff that does not give us access to these values.
DRIVE_REMOVABLE = 2 # [CodeStyle: Windows Enum value]
GENERIC_READ = 2147483648 # [CodeStyle: Windows Enum value]
GENERIC_WRITE = 1073741824 # [CodeStyle: Windows Enum value]
FILE_SHARE_READ = 1 # [CodeStyle: Windows Enum value]
FILE_SHARE_WRITE = 2 # [CodeStyle: Windows Enum value]
IOCTL_STORAGE_EJECT_MEDIA = 2967560 # [CodeStyle: Windows Enum value]
OPEN_EXISTING = 3 # [CodeStyle: Windows Enum value]
# Setup the DeviceIoControl function arguments and return type.
# See ctypes documentation for details on how to call C functions from python, and why this is important.
ctypes.windll.kernel32.DeviceIoControl.argtypes = [
wintypes.HANDLE, # _In_ HANDLE hDevice
wintypes.DWORD, # _In_ DWORD dwIoControlCode
wintypes.LPVOID, # _In_opt_ LPVOID lpInBuffer
wintypes.DWORD, # _In_ DWORD nInBufferSize
wintypes.LPVOID, # _Out_opt_ LPVOID lpOutBuffer
wintypes.DWORD, # _In_ DWORD nOutBufferSize
ctypes.POINTER(wintypes.DWORD), # _Out_opt_ LPDWORD lpBytesReturned
wintypes.LPVOID # _Inout_opt_ LPOVERLAPPED lpOverlapped
]
ctypes.windll.kernel32.DeviceIoControl.restype = wintypes.BOOL
## Removable drive support for windows
class WindowsRemovableDrivePlugin(RemovableDrivePlugin.RemovableDrivePlugin):
def checkRemovableDrives(self):
drives = {}
bitmask = ctypes.windll.kernel32.GetLogicalDrives()
# Check possible drive letters, from A to Z
# Note: using ascii_uppercase because we do not want this to change with locale!
for letter in string.ascii_uppercase:
drive = "{0}:/".format(letter)
# Do we really want to skip A and B?
# GetDriveTypeA explicitly wants a byte array of type ascii. It will accept a string, but this wont work
if bitmask & 1 and ctypes.windll.kernel32.GetDriveTypeA(drive.encode("ascii")) == DRIVE_REMOVABLE:
volume_name = ""
name_buffer = ctypes.create_unicode_buffer(1024)
filesystem_buffer = ctypes.create_unicode_buffer(1024)
error = ctypes.windll.kernel32.GetVolumeInformationW(ctypes.c_wchar_p(drive), name_buffer, ctypes.sizeof(name_buffer), None, None, None, filesystem_buffer, ctypes.sizeof(filesystem_buffer))
if error != 0:
volume_name = name_buffer.value
if not volume_name:
volume_name = catalog.i18nc("@item:intext", "Removable Drive")
# Certain readers will report themselves as a volume even when there is no card inserted, but will show an
# "No volume in drive" warning when trying to call GetDiskFreeSpace. However, they will not report a valid
# filesystem, so we can filter on that. In addition, this excludes other things with filesystems Windows
# does not support.
if filesystem_buffer.value == "":
continue
# Check for the free space. Some card readers show up as a drive with 0 space free when there is no card inserted.
free_bytes = ctypes.c_longlong(0)
if ctypes.windll.kernel32.GetDiskFreeSpaceExA(drive.encode("ascii"), ctypes.byref(free_bytes), None, None) == 0:
continue
if free_bytes.value < 1:
continue
drives[drive] = "{0} ({1}:)".format(volume_name, letter)
bitmask >>= 1
return drives
def performEjectDevice(self, device):
# Magic WinAPI stuff
# First, open a handle to the Device
handle = ctypes.windll.kernel32.CreateFileA("\\\\.\\{0}".format(device.getId()[:-1]).encode("ascii"), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None )
if handle == -1:
# ctypes.WinError sets up an GetLastError API call for windows as an Python OSError exception.
# So we use this to raise the error to our caller.
raise ctypes.WinError()
# The DeviceIoControl requires a bytes_returned pointer to be a valid pointer.
# So create a ctypes DWORD to reference. (Without this pointer the DeviceIoControl function will crash with an access violation after doing its job.
bytes_returned = wintypes.DWORD(0)
error = None
# Then, try and tell it to eject
return_code = ctypes.windll.kernel32.DeviceIoControl(handle, IOCTL_STORAGE_EJECT_MEDIA, None, 0, None, 0, ctypes.pointer(bytes_returned), None)
# DeviceIoControl with IOCTL_STORAGE_EJECT_MEDIA return 0 on error.
if return_code == 0:
# ctypes.WinError sets up an GetLastError API call for windows as an Python OSError exception.
# So we use this to raise the error to our caller.
error = ctypes.WinError()
# Do not raise an error here yet, so we can properly close the handle.
# Finally, close the handle
ctypes.windll.kernel32.CloseHandle(handle)
# If an error happened in the DeviceIoControl, raise it now.
if error:
raise error
# Return success
return True | agpl-3.0 |
RaoUmer/cudamat | examples/rbm_cudamat.py | 6 | 2457 | from __future__ import division
import time
import numpy as np
import cudamat as cm
import util
# initialize CUDA
cm.cublas_init()
cm.CUDAMatrix.init_random(1)
# load data
util.load('mnist.dat', globals())
dev_dat = cm.CUDAMatrix(cm.reformat(dat/255.))
# training parameters
epsilon = 0.1
momentum = 0.9
num_epochs = 30
batch_size = 128
num_batches = dat.shape[1]//batch_size
# model parameters
num_vis = dat.shape[0]
num_hid = 4096
# initialize weights
w_vh = cm.CUDAMatrix(0.1 * np.random.randn(num_vis, num_hid))
w_v = cm.CUDAMatrix(np.zeros((num_vis, 1)))
w_h = cm.CUDAMatrix(-4.*np.ones((num_hid, 1)))
# initialize weight updates
wu_vh = cm.CUDAMatrix(np.zeros((num_vis, num_hid)))
wu_v = cm.CUDAMatrix(np.zeros((num_vis, 1)))
wu_h = cm.CUDAMatrix(np.zeros((num_hid, 1)))
# initialize temporary storage
v = cm.empty((num_vis, batch_size))
h = cm.empty((num_hid, batch_size))
r = cm.empty((num_hid, batch_size))
start_time = time.time()
for epoch in range(num_epochs):
print("Epoch %i" % (epoch + 1))
err = []
for batch in range(num_batches):
# get current minibatch
v_true = dev_dat.slice(batch*batch_size,(batch + 1)*batch_size)
v.assign(v_true)
# apply momentum
wu_vh.mult(momentum)
wu_v.mult(momentum)
wu_h.mult(momentum)
# positive phase
cm.dot(w_vh.T, v, target = h)
h.add_col_vec(w_h)
h.apply_sigmoid()
wu_vh.add_dot(v, h.T)
wu_v.add_sums(v, axis = 1)
wu_h.add_sums(h, axis = 1)
# sample hiddens
r.fill_with_rand()
r.less_than(h, target = h)
# negative phase
cm.dot(w_vh, h, target = v)
v.add_col_vec(w_v)
v.apply_sigmoid()
cm.dot(w_vh.T, v, target = h)
h.add_col_vec(w_h)
h.apply_sigmoid()
wu_vh.subtract_dot(v, h.T)
wu_v.add_sums(v, axis = 1, mult = -1.)
wu_h.add_sums(h, axis = 1, mult = -1.)
# update weights
w_vh.add_mult(wu_vh, epsilon/batch_size)
w_v.add_mult(wu_v, epsilon/batch_size)
w_h.add_mult(wu_h, epsilon/batch_size)
# calculate reconstruction error
v.subtract(v_true)
err.append(v.euclid_norm()**2/(num_vis*batch_size))
print("Mean squared error: %f" % np.mean(err))
print("Time: %f" % (time.time() - start_time))
w_vh.copy_to_host()
util.save('weights.dat', 'w_vh', {'w_vh': w_vh.numpy_array})
cm.cublas_shutdown()
| bsd-3-clause |
loopCM/chromium | chrome/test/functional/perf/endure_result_parser.py | 59 | 29633 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to parse perf data from Chrome Endure test executions, to be graphed.
This script connects via HTTP to a buildbot master in order to scrape and parse
perf data from Chrome Endure tests that have been run. The perf data is then
stored in local text files to be graphed by the Chrome Endure graphing code.
It is assumed that any Chrome Endure tests that show up on the waterfall have
names that are of the following form:
"endure_<webapp_name>-<test_name>" (non-Web Page Replay tests)
or
"endure_<webapp_name>_wpr-<test_name>" (Web Page Replay tests)
For example: "endure_gmail_wpr-testGmailComposeDiscard"
This script accepts either a URL or a local path as a buildbot location.
It switches its behavior if a URL is given, or a local path is given.
When a URL is given, it gets buildbot logs from the buildbot builders URL
e.g. http://build.chromium.org/p/chromium.endure/builders/.
When a local path is given, it gets buildbot logs from buildbot's internal
files in the directory e.g. /home/chrome-bot/buildbot.
"""
import cPickle
import getpass
import logging
import optparse
import os
import re
import simplejson
import socket
import string
import sys
import time
import urllib
import urllib2
CHROME_ENDURE_SLAVE_NAMES = [
'Linux QA Perf (0)',
'Linux QA Perf (1)',
'Linux QA Perf (2)',
'Linux QA Perf (3)',
'Linux QA Perf (4)',
'Linux QA Perf (dbg)(0)',
'Linux QA Perf (dbg)(1)',
'Linux QA Perf (dbg)(2)',
'Linux QA Perf (dbg)(3)',
'Linux QA Perf (dbg)(4)',
]
BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.endure/builders/'
LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__),
'_parser_last_processed.txt')
LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser()
MANGLE_TRANSLATION = string.maketrans(' ()', '___')
def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir):
"""Sets up the directory containing results for a particular test, if needed.
Args:
webapp_name: The string name of the webapp associated with the given test.
test_name: The string name of the test.
dest_dir: The name of the destination directory that needs to be set up.
"""
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Test name directory.
os.chmod(dest_dir, 0755)
# Create config file.
config_file = os.path.join(dest_dir, 'config.js')
if not os.path.exists(config_file):
with open(config_file, 'w') as f:
f.write('var Config = {\n')
f.write('buildslave: "Chrome Endure Bots",\n')
f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(),
test_name))
f.write('};\n')
os.chmod(config_file, 0755)
# Set up symbolic links to the real graphing files.
link_file = os.path.join(dest_dir, 'index.html')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.html', link_file)
link_file = os.path.join(dest_dir, 'endure_plotter.js')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.js', link_file)
link_file = os.path.join(dest_dir, 'js')
if not os.path.exists(link_file):
os.symlink('../../js', link_file)
def WriteToDataFile(new_line, existing_lines, revision, data_file):
"""Writes a new entry to an existing perf data file to be graphed.
If there's an existing line with the same revision number, overwrite its data
with the new line. Else, prepend the info for the new revision.
Args:
new_line: A dictionary representing perf information for the new entry.
existing_lines: A list of string lines from the existing perf data file.
revision: The string revision number associated with the new perf entry.
data_file: The string name of the perf data file to which to write.
"""
overwritten = False
for i, line in enumerate(existing_lines):
line_dict = simplejson.loads(line)
if line_dict['rev'] == revision:
existing_lines[i] = simplejson.dumps(new_line)
overwritten = True
break
elif int(line_dict['rev']) < int(revision):
break
if not overwritten:
existing_lines.insert(0, simplejson.dumps(new_line))
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
def OutputPerfData(revision, graph_name, values, units, units_x, dest_dir,
is_stacked=False, stack_order=[]):
"""Outputs perf data to a local text file to be graphed.
Args:
revision: The string revision number associated with the perf data.
graph_name: The string name of the graph on which to plot the data.
values: A dict which maps a description to a value. A value is either a
single data value to be graphed, or a list of 2-tuples
representing (x, y) points to be graphed for long-running tests.
units: The string description for the y-axis units on the graph.
units_x: The string description for the x-axis units on the graph. Should
be set to None if the results are not for long-running graphs.
dest_dir: The name of the destination directory to which to write.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
stack_order: A list that contains key strings in the order to stack values
in the graph.
"""
# Update graphs.dat, which contains metadata associated with each graph.
existing_graphs = []
graphs_file = os.path.join(dest_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file, 'r') as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
existing_graphs = sorted(existing_graphs, key=lambda x: x['name'])
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs, indent=2))
os.chmod(graphs_file, 0755)
# Update summary data file, containing the actual data to be graphed.
data_file_name = graph_name + '-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_traces = {}
for description in values:
value = values[description]
if units_x:
points = []
for point in value:
points.append([str(point[0]), str(point[1])])
new_traces[description] = points
else:
new_traces[description] = [str(value), str(0.0)]
new_line = {
'traces': new_traces,
'rev': revision
}
if is_stacked:
new_line['stack'] = True
new_line['stack_order'] = stack_order
WriteToDataFile(new_line, existing_lines, revision, data_file)
def OutputEventData(revision, event_dict, dest_dir):
"""Outputs event data to a local text file to be graphed.
Args:
revision: The string revision number associated with the event data.
event_dict: A dict which maps a description to an array of tuples
representing event data to be graphed.
dest_dir: The name of the destination directory to which to write.
"""
data_file_name = '_EVENT_-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_events = {}
for description in event_dict:
event_list = event_dict[description]
value_list = []
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events[description] = value_list
new_line = {
'rev': revision,
'events': new_events
}
WriteToDataFile(new_line, existing_lines, revision, data_file)
def UpdatePerfDataFromFetchedContent(
revision, content, webapp_name, test_name, graph_dir, only_dmp=False):
"""Update perf data from fetched stdio data.
Args:
revision: The string revision number associated with the new perf entry.
content: Fetched stdio data.
webapp_name: A name of the webapp.
test_name: A name of the test.
graph_dir: A path to the graph directory.
only_dmp: True if only Deep Memory Profiler results should be used.
"""
perf_data_raw = []
def AppendRawPerfData(graph_name, description, value, units, units_x,
webapp_name, test_name, is_stacked=False):
perf_data_raw.append({
'graph_name': graph_name,
'description': description,
'value': value,
'units': units,
'units_x': units_x,
'webapp_name': webapp_name,
'test_name': test_name,
'stack': is_stacked,
})
# First scan for short-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], None,
webapp_name, webapp_name)
# Next scan for long-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
# TODO(dmikurube): Change the condition to use stacked graph when we
# determine how to specify it.
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], match[4],
webapp_name, test_name, match[0].endswith('-DMP'))
# Next scan for events in the test results.
for match in re.findall(
r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', content):
try:
match1 = eval(match[1])
except SyntaxError:
match1 = None
if match1:
AppendRawPerfData('_EVENT_', match[0], match1, None, None,
webapp_name, test_name)
# For each graph_name/description pair that refers to a long-running test
# result or an event, concatenate all the results together (assume results
# in the input file are in the correct order). For short-running test
# results, keep just one if more than one is specified.
perf_data = {} # Maps a graph-line key to a perf data dictionary.
for data in perf_data_raw:
key_graph = data['graph_name']
key_description = data['description']
if not key_graph in perf_data:
perf_data[key_graph] = {
'graph_name': data['graph_name'],
'value': {},
'units': data['units'],
'units_x': data['units_x'],
'webapp_name': data['webapp_name'],
'test_name': data['test_name'],
}
perf_data[key_graph]['stack'] = data['stack']
if 'stack_order' not in perf_data[key_graph]:
perf_data[key_graph]['stack_order'] = []
if (data['stack'] and
data['description'] not in perf_data[key_graph]['stack_order']):
perf_data[key_graph]['stack_order'].append(data['description'])
if data['graph_name'] != '_EVENT_' and not data['units_x']:
# Short-running test result.
perf_data[key_graph]['value'][key_description] = data['value']
else:
# Long-running test result or event.
if key_description in perf_data[key_graph]['value']:
perf_data[key_graph]['value'][key_description] += data['value']
else:
perf_data[key_graph]['value'][key_description] = data['value']
# Finally, for each graph-line in |perf_data|, update the associated local
# graph data files if necessary.
for perf_data_key in perf_data:
perf_data_dict = perf_data[perf_data_key]
dest_dir = os.path.join(graph_dir, perf_data_dict['webapp_name'])
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Webapp name directory.
os.chmod(dest_dir, 0755)
dest_dir = os.path.join(dest_dir, perf_data_dict['test_name'])
SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'],
perf_data_dict['test_name'], dest_dir)
if perf_data_dict['graph_name'] == '_EVENT_':
OutputEventData(revision, perf_data_dict['value'], dest_dir)
else:
OutputPerfData(revision, perf_data_dict['graph_name'],
perf_data_dict['value'],
perf_data_dict['units'], perf_data_dict['units_x'],
dest_dir,
perf_data_dict['stack'], perf_data_dict['stack_order'])
def SlaveLocation(master_location, slave_info):
"""Returns slave location for |master_location| and |slave_info|."""
if master_location.startswith('http://'):
return master_location + urllib.quote(slave_info['slave_name'])
else:
return os.path.join(master_location,
slave_info['slave_name'].translate(MANGLE_TRANSLATION))
def GetRevisionAndLogs(slave_location, build_num):
"""Get a revision number and log locations.
Args:
slave_location: A URL or a path to the build slave data.
build_num: A build number.
Returns:
A pair of the revision number and a list of strings that contain locations
of logs. (False, []) in case of error.
"""
if slave_location.startswith('http://'):
location = slave_location + '/builds/' + str(build_num)
else:
location = os.path.join(slave_location, str(build_num))
revision = False
logs = []
fp = None
try:
if location.startswith('http://'):
fp = urllib2.urlopen(location)
contents = fp.read()
revisions = re.findall(r'<td class="left">got_revision</td>\s+'
'<td>(\d+)</td>\s+<td>Source</td>', contents)
if revisions:
revision = revisions[0]
logs = [location + link + '/text' for link
in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)]
else:
fp = open(location, 'rb')
build = cPickle.load(fp)
properties = build.getProperties()
if properties.has_key('got_revision'):
revision = build.getProperty('got_revision')
candidates = os.listdir(slave_location)
logs = [os.path.join(slave_location, filename)
for filename in candidates
if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)]
except urllib2.URLError, e:
logging.exception('Error reading build URL "%s": %s', location, str(e))
return False, []
except (IOError, OSError), e:
logging.exception('Error reading build file "%s": %s', location, str(e))
return False, []
finally:
if fp:
fp.close()
return revision, logs
def ExtractTestNames(log_location, is_dbg):
"""Extract test names from |log_location|.
Returns:
A dict of a log location, webapp's name and test's name. False if error.
"""
if log_location.startswith('http://'):
location = urllib.unquote(log_location)
test_pattern = r'endure_([^_]+)(_test |-)([^/]+)/'
wpr_test_pattern = r'endure_([^_]+)_wpr(_test |-)([^/]+)/'
else:
location = log_location
test_pattern = r'endure_([^_]+)(_test_|-)([^/]+)-stdio'
wpr_test_pattern = 'endure_([^_]+)_wpr(_test_|-)([^/]+)-stdio'
found_wpr_result = False
match = re.findall(test_pattern, location)
if not match:
match = re.findall(wpr_test_pattern, location)
if match:
found_wpr_result = True
else:
logging.error('Test name not in expected format: ' + location)
return False
match = match[0]
webapp_name = match[0] + '_wpr' if found_wpr_result else match[0]
webapp_name = webapp_name + '_dbg' if is_dbg else webapp_name
test_name = match[2]
return {
'location': log_location,
'webapp_name': webapp_name,
'test_name': test_name,
}
def GetStdioContents(stdio_location):
"""Gets appropriate stdio contents.
Returns:
A content string of the stdio log. None in case of error.
"""
fp = None
contents = ''
try:
if stdio_location.startswith('http://'):
fp = urllib2.urlopen(stdio_location, timeout=60)
# Since in-progress test output is sent chunked, there's no EOF. We need
# to specially handle this case so we don't hang here waiting for the
# test to complete.
start_time = time.time()
while True:
data = fp.read(1024)
if not data:
break
contents += data
if time.time() - start_time >= 30: # Read for at most 30 seconds.
break
else:
fp = open(stdio_location)
data = fp.read()
contents = ''
index = 0
# Buildbot log files are stored in the netstring format.
# http://en.wikipedia.org/wiki/Netstring
while index < len(data):
index2 = index
while data[index2].isdigit():
index2 += 1
if data[index2] != ':':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
length = int(data[index:index2])
index = index2 + 1
channel = int(data[index])
index += 1
if data[index+length-1] != ',':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
if channel == 0:
contents += data[index:(index+length-1)]
index += length
except (urllib2.URLError, socket.error, IOError, OSError), e:
# Issue warning but continue to the next stdio link.
logging.warning('Error reading test stdio data "%s": %s',
stdio_location, str(e))
finally:
if fp:
fp.close()
return contents
def UpdatePerfDataForSlaveAndBuild(
slave_info, build_num, graph_dir, master_location):
"""Process updated perf data for a particular slave and build number.
Args:
slave_info: A dictionary containing information about the slave to process.
build_num: The particular build number on the slave to process.
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if the perf data for the given slave/build is updated properly, or
False if any critical error occurred.
"""
if not master_location.startswith('http://'):
# Source is a file.
from buildbot.status import builder
slave_location = SlaveLocation(master_location, slave_info)
logging.debug(' %s, build %d.', slave_info['slave_name'], build_num)
is_dbg = '(dbg)' in slave_info['slave_name']
revision, logs = GetRevisionAndLogs(slave_location, build_num)
if not revision:
return False
stdios = []
for log_location in logs:
stdio = ExtractTestNames(log_location, is_dbg)
if not stdio:
return False
stdios.append(stdio)
for stdio in stdios:
stdio_location = stdio['location']
contents = GetStdioContents(stdio_location)
if contents:
UpdatePerfDataFromFetchedContent(revision, contents,
stdio['webapp_name'],
stdio['test_name'],
graph_dir, is_dbg)
return True
def GetMostRecentBuildNum(master_location, slave_name):
"""Gets the most recent buld number for |slave_name| in |master_location|."""
most_recent_build_num = None
if master_location.startswith('http://'):
slave_url = master_location + urllib.quote(slave_name)
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(slave_url, timeout=60)
url_contents = fp.read()
except urllib2.URLError, e:
logging.exception('Error reading builder URL: %s', str(e))
return None
finally:
if fp:
fp.close()
matches = re.findall(r'/(\d+)/stop', url_contents)
if matches:
most_recent_build_num = int(matches[0])
else:
matches = re.findall(r'#(\d+)</a></td>', url_contents)
if matches:
most_recent_build_num = sorted(map(int, matches), reverse=True)[0]
else:
slave_path = os.path.join(master_location,
slave_name.translate(MANGLE_TRANSLATION))
files = os.listdir(slave_path)
number_files = [int(filename) for filename in files if filename.isdigit()]
if number_files:
most_recent_build_num = sorted(number_files, reverse=True)[0]
if most_recent_build_num:
logging.debug('%s most recent build number: %s',
slave_name, most_recent_build_num)
else:
logging.error('Could not identify latest build number for slave %s.',
slave_name)
return most_recent_build_num
def UpdatePerfDataFiles(graph_dir, master_location):
"""Updates the Chrome Endure graph data files with the latest test results.
For each known Chrome Endure slave, we scan its latest test results looking
for any new test data. Any new data that is found is then appended to the
data files used to display the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if all graph data files are updated properly, or
False if any error occurred.
"""
slave_list = []
for slave_name in CHROME_ENDURE_SLAVE_NAMES:
slave_info = {}
slave_info['slave_name'] = slave_name
slave_info['most_recent_build_num'] = None
slave_info['last_processed_build_num'] = None
slave_list.append(slave_info)
# Identify the most recent build number for each slave.
logging.debug('Searching for latest build numbers for each slave...')
for slave in slave_list:
slave_name = slave['slave_name']
slave['most_recent_build_num'] = GetMostRecentBuildNum(
master_location, slave_name)
# Identify the last-processed build number for each slave.
logging.debug('Identifying last processed build numbers...')
if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE):
for slave_info in slave_list:
slave_info['last_processed_build_num'] = 0
else:
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp:
file_contents = fp.read()
for match in re.findall(r'([^:]+):(\d+)', file_contents):
slave_name = match[0].strip()
last_processed_build_num = match[1].strip()
for slave_info in slave_list:
if slave_info['slave_name'] == slave_name:
slave_info['last_processed_build_num'] = int(
last_processed_build_num)
for slave_info in slave_list:
if not slave_info['last_processed_build_num']:
slave_info['last_processed_build_num'] = 0
logging.debug('Done identifying last processed build numbers.')
# For each Chrome Endure slave, process each build in-between the last
# processed build num and the most recent build num, inclusive. To process
# each one, first get the revision number for that build, then scan the test
# result stdio for any performance data, and add any new performance data to
# local files to be graphed.
for slave_info in slave_list:
logging.debug('Processing %s, builds %d-%d...',
slave_info['slave_name'],
slave_info['last_processed_build_num'],
slave_info['most_recent_build_num'])
curr_build_num = slave_info['last_processed_build_num']
while curr_build_num <= slave_info['most_recent_build_num']:
if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num,
graph_dir, master_location):
# Do not give up. The first files might be removed by buildbot.
logging.warning('Logs do not exist in buildbot for #%d of %s.' %
(curr_build_num, slave_info['slave_name']))
curr_build_num += 1
# Log the newly-processed build numbers.
logging.debug('Logging the newly-processed build numbers...')
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f:
for slave_info in slave_list:
f.write('%s:%s\n' % (slave_info['slave_name'],
slave_info['most_recent_build_num']))
return True
def GenerateIndexPage(graph_dir):
"""Generates a summary (landing) page for the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
"""
logging.debug('Generating new index.html page...')
# Page header.
page = """
<html>
<head>
<title>Chrome Endure Overview</title>
<script language="javascript">
function DisplayGraph(name, graph) {
document.write(
'<td><iframe scrolling="no" height="438" width="700" src="');
document.write(name);
document.write('"></iframe></td>');
}
</script>
</head>
<body>
<center>
<h1>
Chrome Endure
</h1>
"""
# Print current time.
page += '<p>Updated: %s</p>\n' % (
time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z'))
# Links for each webapp.
webapp_names = [x for x in os.listdir(graph_dir) if
x not in ['js', 'old_data', '.svn', '.git'] and
os.path.isdir(os.path.join(graph_dir, x))]
webapp_names = sorted(webapp_names)
page += '<p> ['
for i, name in enumerate(webapp_names):
page += '<a href="#%s">%s</a>' % (name.upper(), name.upper())
if i < len(webapp_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each webapp.
for webapp_name in webapp_names:
page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(),
webapp_name.upper())
# Links for each test for this webapp.
test_names = [x for x in
os.listdir(os.path.join(graph_dir, webapp_name))]
test_names = sorted(test_names)
page += '<p> ['
for i, name in enumerate(test_names):
page += '<a href="#%s">%s</a>' % (name, name)
if i < len(test_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each test for this webapp.
for test_name in test_names:
# Get the set of graph names for this test.
graph_names = [x[:x.find('-summary.dat')] for x in
os.listdir(os.path.join(graph_dir,
webapp_name, test_name))
if '-summary.dat' in x and '_EVENT_' not in x]
graph_names = sorted(graph_names)
page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name)
page += '<table>\n'
for i, graph_name in enumerate(graph_names):
if i % 2 == 0:
page += ' <tr>\n'
page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");'
'</script>\n' % (webapp_name, test_name, graph_name))
if i % 2 == 1:
page += ' </tr>\n'
if len(graph_names) % 2 == 1:
page += ' </tr>\n'
page += '</table>\n'
# Page footer.
page += """
</center>
</body>
</html>
"""
index_file = os.path.join(graph_dir, 'index.html')
with open(index_file, 'w') as f:
f.write(page)
os.chmod(index_file, 0755)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Use verbose logging.')
parser.add_option(
'-s', '--stdin', action='store_true', default=False,
help='Input from stdin instead of slaves for testing this script.')
parser.add_option(
'-b', '--buildbot', dest='buildbot', metavar="BUILDBOT",
default=BUILDER_URL_BASE,
help='Use log files in a buildbot at BUILDBOT. BUILDBOT can be a '
'buildbot\'s builder URL or a local path to a buildbot directory. '
'Both an absolute path and a relative path are available, e.g. '
'"/home/chrome-bot/buildbot" or "../buildbot". '
'[default: %default]')
parser.add_option(
'-g', '--graph', dest='graph_dir', metavar="DIR", default=LOCAL_GRAPH_DIR,
help='Output graph data files to DIR. [default: %default]')
options, _ = parser.parse_args(sys.argv)
logging_level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(level=logging_level,
format='[%(asctime)s] %(levelname)s: %(message)s')
if options.stdin:
content = sys.stdin.read()
UpdatePerfDataFromFetchedContent(
'12345', content, 'webapp', 'test', options.graph_dir)
else:
if options.buildbot.startswith('http://'):
master_location = options.buildbot
else:
build_dir = os.path.join(options.buildbot, 'build')
third_party_dir = os.path.join(build_dir, 'third_party')
sys.path.append(third_party_dir)
sys.path.append(os.path.join(third_party_dir, 'buildbot_8_4p1'))
sys.path.append(os.path.join(third_party_dir, 'twisted_10_2'))
master_location = os.path.join(build_dir, 'masters',
'master.chromium.endure')
success = UpdatePerfDataFiles(options.graph_dir, master_location)
if not success:
logging.error('Failed to update perf data files.')
sys.exit(0)
GenerateIndexPage(options.graph_dir)
logging.debug('All done!')
if __name__ == '__main__':
main()
| bsd-3-clause |
ghchinoy/tensorflow | tensorflow/python/keras/backend_config_test.py | 16 | 2087 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BackendConfigTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_espilon(self):
epsilon = 1e-2
keras.backend_config.set_epsilon(epsilon)
self.assertEqual(keras.backend_config.epsilon(), epsilon)
keras.backend_config.set_epsilon(1e-7)
self.assertEqual(keras.backend_config.epsilon(), 1e-7)
def test_floatx(self):
floatx = 'float64'
keras.backend_config.set_floatx(floatx)
self.assertEqual(keras.backend_config.floatx(), floatx)
keras.backend_config.set_floatx('float32')
self.assertEqual(keras.backend_config.floatx(), 'float32')
def test_image_data_format(self):
image_data_format = 'channels_first'
keras.backend_config.set_image_data_format(image_data_format)
self.assertEqual(keras.backend_config.image_data_format(),
image_data_format)
keras.backend_config.set_image_data_format('channels_last')
self.assertEqual(keras.backend_config.image_data_format(), 'channels_last')
if __name__ == '__main__':
test.main()
| apache-2.0 |
ayesandarmoe/microblog_flask_tutorial | flask/lib/python2.7/site-packages/pip/_vendor/__init__.py | 252 | 2508 | """
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
__import__(modulename, globals(), locals(), level=0)
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
| gpl-2.0 |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/cctv.py | 64 | 6903 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
try_get,
unified_timestamp,
)
class CCTVIE(InfoExtractor):
IE_DESC = '央视网'
_VALID_URL = r'https?://(?:(?:[^/]+)\.(?:cntv|cctv)\.(?:com|cn)|(?:www\.)?ncpa-classic\.com)/(?:[^/]+/)*?(?P<id>[^/?#&]+?)(?:/index)?(?:\.s?html|[?#&]|$)'
_TESTS = [{
# fo.addVariable("videoCenterId","id")
'url': 'http://sports.cntv.cn/2016/02/12/ARTIaBRxv4rTT1yWf1frW2wi160212.shtml',
'md5': 'd61ec00a493e09da810bf406a078f691',
'info_dict': {
'id': '5ecdbeab623f4973b40ff25f18b174e8',
'ext': 'mp4',
'title': '[NBA]二少联手砍下46分 雷霆主场击败鹈鹕(快讯)',
'description': 'md5:7e14a5328dc5eb3d1cd6afbbe0574e95',
'duration': 98,
'uploader': 'songjunjie',
'timestamp': 1455279956,
'upload_date': '20160212',
},
}, {
# var guid = "id"
'url': 'http://tv.cctv.com/2016/02/05/VIDEUS7apq3lKrHG9Dncm03B160205.shtml',
'info_dict': {
'id': 'efc5d49e5b3b4ab2b34f3a502b73d3ae',
'ext': 'mp4',
'title': '[赛车]“车王”舒马赫恢复情况成谜(快讯)',
'description': '2月4日,蒙特泽莫罗透露了关于“车王”舒马赫恢复情况,但情况是否属实遭到了质疑。',
'duration': 37,
'uploader': 'shujun',
'timestamp': 1454677291,
'upload_date': '20160205',
},
'params': {
'skip_download': True,
},
}, {
# changePlayer('id')
'url': 'http://english.cntv.cn/special/four_comprehensives/index.shtml',
'info_dict': {
'id': '4bb9bb4db7a6471ba85fdeda5af0381e',
'ext': 'mp4',
'title': 'NHnews008 ANNUAL POLITICAL SEASON',
'description': 'Four Comprehensives',
'duration': 60,
'uploader': 'zhangyunlei',
'timestamp': 1425385521,
'upload_date': '20150303',
},
'params': {
'skip_download': True,
},
}, {
# loadvideo('id')
'url': 'http://cctv.cntv.cn/lm/tvseries_russian/yilugesanghua/index.shtml',
'info_dict': {
'id': 'b15f009ff45c43968b9af583fc2e04b2',
'ext': 'mp4',
'title': 'Путь,усыпанный космеями Серия 1',
'description': 'Путь, усыпанный космеями',
'duration': 2645,
'uploader': 'renxue',
'timestamp': 1477479241,
'upload_date': '20161026',
},
'params': {
'skip_download': True,
},
}, {
# var initMyAray = 'id'
'url': 'http://www.ncpa-classic.com/2013/05/22/VIDE1369219508996867.shtml',
'info_dict': {
'id': 'a194cfa7f18c426b823d876668325946',
'ext': 'mp4',
'title': '小泽征尔音乐塾 音乐梦想无国界',
'duration': 2173,
'timestamp': 1369248264,
'upload_date': '20130522',
},
'params': {
'skip_download': True,
},
}, {
# var ids = ["id"]
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
'info_dict': {
'id': 'a8606119a4884588a79d81c02abecc16',
'ext': 'mp3',
'title': '来自维也纳的新年贺礼',
'description': 'md5:f13764ae8dd484e84dd4b39d5bcba2a7',
'duration': 1578,
'uploader': 'djy',
'timestamp': 1482942419,
'upload_date': '20161228',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download m3u8 information'],
}, {
'url': 'http://ent.cntv.cn/2016/01/18/ARTIjprSSJH8DryTVr5Bx8Wb160118.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cntv.cn/video/C39296/e0210d949f113ddfb38d31f00a4e5c44',
'only_matching': True,
}, {
'url': 'http://english.cntv.cn/2016/09/03/VIDEhnkB5y9AgHyIEVphCEz1160903.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cctv.com/2016/09/07/VIDE5C1FnlX5bUywlrjhxXOV160907.shtml',
'only_matching': True,
}, {
'url': 'http://tv.cntv.cn/video/C39296/95cfac44cabd3ddc4a9438780a4e5c44',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)',
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
r'var\s+ids\s*=\s*\[["\']([\da-fA-F]+)'],
webpage, 'video id')
data = self._download_json(
'http://vdn.apps.cntv.cn/api/getHttpVideoInfo.do', video_id,
query={
'pid': video_id,
'url': url,
'idl': 32,
'idlr': 32,
'modifyed': 'false',
})
title = data['title']
formats = []
video = data.get('video')
if isinstance(video, dict):
for quality, chapters_key in enumerate(('lowChapters', 'chapters')):
video_url = try_get(
video, lambda x: x[chapters_key][0]['url'], compat_str)
if video_url:
formats.append({
'url': video_url,
'format_id': 'http',
'quality': quality,
'preference': -1,
})
hls_url = try_get(data, lambda x: x['hls_url'], compat_str)
if hls_url:
hls_url = re.sub(r'maxbr=\d+&?', '', hls_url)
formats.extend(self._extract_m3u8_formats(
hls_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
self._sort_formats(formats)
uploader = data.get('editer_name')
description = self._html_search_meta(
'description', webpage, default=None)
timestamp = unified_timestamp(data.get('f_pgmtime'))
duration = float_or_none(try_get(video, lambda x: x['totalLength']))
return {
'id': video_id,
'title': title,
'description': description,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
| gpl-3.0 |
killbug2004/amoco | amoco/arch/x86/env.py | 5 | 2585 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2006-2011 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# import expressions:
from amoco.cas.expressions import *
# 32bits registers :
#-------------------
eax = reg('eax',32) # accumulator for operands and results data
ebx = reg('ebx',32) # pointer to data in the DS segment
ecx = reg('ecx',32) # counter for string and loop operations
edx = reg('edx',32) # I/O pointer
ebp = reg('ebp',32) # pointer to data in the stack (SS segment)
esp = reg('esp',32) # stack pointer (SS segment)
esi = reg('esi',32) # ptr to data in segment pointed by DS; src ptr for strings
edi = reg('edi',32) # ptr to data in segment pointed by ES; dst ptr for strings
eip = reg('eip',32) # instruction pointer in 32 bit mode
eflags = reg('eflags',32)
ax = slc(eax,0,16,'ax')
bx = slc(ebx,0,16,'bx')
cx = slc(ecx,0,16,'cx')
dx = slc(edx,0,16,'dx')
bp = slc(ebp,0,16,'bp')
sp = slc(esp,0,16,'sp')
si = slc(esi,0,16,'si')
di = slc(edi,0,16,'di')
al = slc(eax,0,8,'al')
bl = slc(ebx,0,8,'bl')
cl = slc(ecx,0,8,'cl')
dl = slc(edx,0,8,'dl')
ah = slc(eax,8,8,'ah')
bh = slc(ebx,8,8,'bh')
ch = slc(ecx,8,8,'ch')
dh = slc(edx,8,8,'dh')
cf = slc(eflags,0,1,'cf') # carry/borrow flag
pf = slc(eflags,2,1,'pf') # parity flag
af = slc(eflags,4,1,'af') # aux carry flag
zf = slc(eflags,6,1,'zf') # zero flag
sf = slc(eflags,7,1,'sf') # sign flag
tf = slc(eflags,8,1,'sf') # trap flag
df = slc(eflags,10,1,'df') # direction flag
of = slc(eflags,11,1,'of') # overflow flag
# segment registers & other mappings:
cs = reg('cs',16) # segment selector for the code segment
ds = reg('ds',16) # segment selector to a data segment
ss = reg('ss',16) # segment selector to the stack segment
es = reg('es',16) # (data)
fs = reg('fs',16) # (data)
gs = reg('gs',16) # (data)
# fpu registers (80 bits holds double extended floats see Intel Vol1--4.4.2):
def st(num):
return reg('st%d'%num,80)
mmregs = [reg('mm%d'%n,64) for n in range(8)]
xmmregs = [reg('xmm%d'%n, 128) for n in range(16)]
# return R/M register (see ModR/M Byte encoding) :
def getreg(i,size=32):
return {8 : (al,cl,dl,bl,ah,ch,dh,bh),
16 : (ax,cx,dx,bx,sp,bp,si,di),
32 : (eax,ecx,edx,ebx,esp,ebp,esi,edi),
64 : mmregs,
128 : xmmregs[:8],
}[size][i]
# system registers:
# control regs:
def cr(num):
return reg('cr%d'%num,32)
# debug regs:
def dr(num):
return reg('dr%d'%num,32)
| gpl-2.0 |
andrewcmyers/tensorflow | tensorflow/python/summary/summary.py | 4 | 12422 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor summaries for exporting information about a model.
See the @{$python/summary} guide.
@@FileWriter
@@FileWriterCache
@@tensor_summary
@@_tensor_summary_v2
@@scalar
@@histogram
@@audio
@@image
@@text
@@merge
@@merge_all
@@get_summary_description
@@PluginAsset
@@get_plugin_asset
@@get_all_plugin_assets
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports tensor-related summaries
# pylint: disable=unused-import
from tensorflow.python.ops.summary_ops import _tensor_summary_v2
from tensorflow.python.ops.summary_ops import tensor_summary
# pylint: enable=unused-import
# exports text
# pylint: disable=unused-import
from tensorflow.python.summary.text_summary import text_summary as text
# pylint: enable=unused-import
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.all_util import remove_undocumented
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
# pylint: disable=protected-access
val = _gen_logging_ops._scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
# pylint: disable=protected-access
val = _gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
# pylint: disable=protected-access
val = _gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with _summary_op_util.summary_scope(
name, family=family, values=[tensor]) as (tag, scope):
# pylint: disable=protected-access
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops._audio_summary_v2(
tag=tag, tensor=tensor, max_outputs=max_outputs,
sample_rate=sample_rate, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
# pylint: enable=line-too-long
name = _summary_op_util.clean_tag(name)
with _ops.name_scope(name, 'Merge', inputs):
# pylint: disable=protected-access
val = _gen_logging_ops._merge_summary(inputs=inputs, name=name)
_summary_op_util.collect(val, collections, [])
return val
def merge_all(key=_ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = _ops.get_collection(key)
if not summary_ops:
return None
else:
return merge(summary_ops)
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
When a Summary op is instantiated, a SummaryDescription of associated
metadata is stored in its NodeDef. This method retrieves the description.
Args:
node_def: the node_def_pb2.NodeDef of a TensorSummary op
Returns:
a summary_pb2.SummaryDescription
Raises:
ValueError: if the node is not a summary op.
"""
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
_allowed_symbols = [
'Summary', 'SummaryDescription', 'Event', 'TaggedRunMetadata', 'SessionLog'
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
yakky/django-localflavor | tests/test_id.py | 4 | 7045 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.id_.forms import (IDPhoneNumberField, IDPostCodeField,
IDNationalIdentityNumberField,
IDLicensePlateField,
IDProvinceSelect,
IDLicensePlatePrefixSelect)
class IDLocalFlavorTests(SimpleTestCase):
def test_IDProvinceSelect(self):
f = IDProvinceSelect()
out = '''<select name="provinces">
<option value="ACE">Aceh</option>
<option value="BLI">Bali</option>
<option value="BTN">Banten</option>
<option value="BKL">Bengkulu</option>
<option value="DIY">Yogyakarta</option>
<option value="JKT">Jakarta</option>
<option value="GOR">Gorontalo</option>
<option value="JMB">Jambi</option>
<option value="JBR">Jawa Barat</option>
<option value="JTG">Jawa Tengah</option>
<option value="JTM">Jawa Timur</option>
<option value="KBR">Kalimantan Barat</option>
<option value="KSL">Kalimantan Selatan</option>
<option value="KTG">Kalimantan Tengah</option>
<option value="KTM">Kalimantan Timur</option>
<option value="BBL">Kepulauan Bangka-Belitung</option>
<option value="KRI">Kepulauan Riau</option>
<option value="LPG" selected="selected">Lampung</option>
<option value="MLK">Maluku</option>
<option value="MUT">Maluku Utara</option>
<option value="NTB">Nusa Tenggara Barat</option>
<option value="NTT">Nusa Tenggara Timur</option>
<option value="PPA">Papua</option>
<option value="PPB">Papua Barat</option>
<option value="RIU">Riau</option>
<option value="SLB">Sulawesi Barat</option>
<option value="SLS">Sulawesi Selatan</option>
<option value="SLT">Sulawesi Tengah</option>
<option value="SLR">Sulawesi Tenggara</option>
<option value="SLU">Sulawesi Utara</option>
<option value="SMB">Sumatera Barat</option>
<option value="SMS">Sumatera Selatan</option>
<option value="SMU">Sumatera Utara</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'LPG'), out)
def test_IDLicensePlatePrefixSelect(self):
f = IDLicensePlatePrefixSelect()
out = '''<select name="codes">
<option value="A">Banten</option>
<option value="AA">Magelang</option>
<option value="AB">Yogyakarta</option>
<option value="AD">Surakarta - Solo</option>
<option value="AE">Madiun</option>
<option value="AG">Kediri</option>
<option value="B">Jakarta</option>
<option value="BA">Sumatera Barat</option>
<option value="BB">Tapanuli</option>
<option value="BD">Bengkulu</option>
<option value="BE" selected="selected">Lampung</option>
<option value="BG">Sumatera Selatan</option>
<option value="BH">Jambi</option>
<option value="BK">Sumatera Utara</option>
<option value="BL">Nanggroe Aceh Darussalam</option>
<option value="BM">Riau</option>
<option value="BN">Kepulauan Bangka Belitung</option>
<option value="BP">Kepulauan Riau</option>
<option value="CC">Corps Consulate</option>
<option value="CD">Corps Diplomatic</option>
<option value="D">Bandung</option>
<option value="DA">Kalimantan Selatan</option>
<option value="DB">Sulawesi Utara Daratan</option>
<option value="DC">Sulawesi Barat</option>
<option value="DD">Sulawesi Selatan</option>
<option value="DE">Maluku</option>
<option value="DG">Maluku Utara</option>
<option value="DH">NTT - Timor</option>
<option value="DK">Bali</option>
<option value="DL">Sulawesi Utara Kepulauan</option>
<option value="DM">Gorontalo</option>
<option value="DN">Sulawesi Tengah</option>
<option value="DR">NTB - Lombok</option>
<option value="DS">Papua dan Papua Barat</option>
<option value="DT">Sulawesi Tenggara</option>
<option value="E">Cirebon</option>
<option value="EA">NTB - Sumbawa</option>
<option value="EB">NTT - Flores</option>
<option value="ED">NTT - Sumba</option>
<option value="F">Bogor</option>
<option value="G">Pekalongan</option>
<option value="H">Semarang</option>
<option value="K">Pati</option>
<option value="KB">Kalimantan Barat</option>
<option value="KH">Kalimantan Tengah</option>
<option value="KT">Kalimantan Timur</option>
<option value="L">Surabaya</option>
<option value="M">Madura</option>
<option value="N">Malang</option>
<option value="P">Jember</option>
<option value="R">Banyumas</option>
<option value="RI">Federal Government</option>
<option value="S">Bojonegoro</option>
<option value="T">Purwakarta</option>
<option value="W">Sidoarjo</option>
<option value="Z">Garut</option>
</select>'''
self.assertHTMLEqual(f.render('codes', 'BE'), out)
def test_IDPhoneNumberField(self):
error_invalid = ['Enter a valid phone number']
valid = {
'0812-3456789': '0812-3456789',
'081234567890': '081234567890',
'021 345 6789': '021 345 6789',
'0213456789': '0213456789',
'+62-21-3456789': '+62-21-3456789',
'(021) 345 6789': '(021) 345 6789',
}
invalid = {
'0123456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-0812-3456789': error_invalid,
'0812345678901': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPhoneNumberField, valid, invalid)
def test_IDPostCodeField(self):
error_invalid = ['Enter a valid post code']
valid = {
'12340': '12340',
'25412': '25412',
' 12340 ': '12340',
}
invalid = {
'12 3 4 0': error_invalid,
'12345': error_invalid,
'10100': error_invalid,
'123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPostCodeField, valid, invalid)
def test_IDNationalIdentityNumberField(self):
error_invalid = ['Enter a valid NIK/KTP number']
valid = {
' 12.3456.010178 3456 ': '12.3456.010178.3456',
'1234560101783456': '12.3456.010178.3456',
'12.3456.010101.3456': '12.3456.010101.3456',
}
invalid = {
'12.3456.310278.3456': error_invalid,
'00.0000.010101.0000': error_invalid,
'1234567890123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid)
def test_IDLicensePlateField(self):
error_invalid = ['Enter a valid vehicle license plate number']
valid = {
' b 1234 ab ': 'B 1234 AB',
'B 1234 ABC': 'B 1234 ABC',
'A 12': 'A 12',
'DK 12345 12': 'DK 12345 12',
'RI 10': 'RI 10',
'CD 12 12': 'CD 12 12',
}
invalid = {
'CD 10 12': error_invalid,
'CD 1234 12': error_invalid,
'RI 10 AB': error_invalid,
'B 12345 01': error_invalid,
'N 1234 12': error_invalid,
'A 12 XYZ': error_invalid,
'Q 1234 AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDLicensePlateField, valid, invalid)
| bsd-3-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| agpl-3.0 |
hoosteeno/mozillians | vendor-local/lib/python/tablib/packages/openpyxl/workbook.py | 116 | 6200 | # file openpyxl/workbook.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Workbook is the top-level container for all document information."""
__docformat__ = "restructuredtext en"
# Python stdlib imports
import datetime
import os
# package imports
from .worksheet import Worksheet
from .writer.dump_worksheet import DumpWorksheet, save_dump
from .writer.strings import StringTableBuilder
from .namedrange import NamedRange
from .style import Style
from .writer.excel import save_workbook
from .shared.exc import ReadOnlyWorkbookException
class DocumentProperties(object):
"""High-level properties of the document."""
def __init__(self):
self.creator = 'Unknown'
self.last_modified_by = self.creator
self.created = datetime.datetime.now()
self.modified = datetime.datetime.now()
self.title = 'Untitled'
self.subject = ''
self.description = ''
self.keywords = ''
self.category = ''
self.company = 'Microsoft Corporation'
class DocumentSecurity(object):
"""Security information about the document."""
def __init__(self):
self.lock_revision = False
self.lock_structure = False
self.lock_windows = False
self.revision_password = ''
self.workbook_password = ''
class Workbook(object):
"""Workbook is the container for all other parts of the document."""
def __init__(self, optimized_write = False):
self.worksheets = []
self._active_sheet_index = 0
self._named_ranges = []
self.properties = DocumentProperties()
self.style = Style()
self.security = DocumentSecurity()
self.__optimized_write = optimized_write
self.__optimized_read = False
self.strings_table_builder = StringTableBuilder()
if not optimized_write:
self.worksheets.append(Worksheet(self))
def _set_optimized_read(self):
self.__optimized_read = True
def get_active_sheet(self):
"""Returns the current active sheet."""
return self.worksheets[self._active_sheet_index]
def create_sheet(self, index = None):
"""Create a worksheet (at an optional index).
:param index: optional position at which the sheet will be inserted
:type index: int
"""
if self.__optimized_read:
raise ReadOnlyWorkbookException('Cannot create new sheet in a read-only workbook')
if self.__optimized_write :
new_ws = DumpWorksheet(parent_workbook = self)
else:
new_ws = Worksheet(parent_workbook = self)
self.add_sheet(worksheet = new_ws, index = index)
return new_ws
def add_sheet(self, worksheet, index = None):
"""Add an existing worksheet (at an optional index)."""
if index is None:
index = len(self.worksheets)
self.worksheets.insert(index, worksheet)
def remove_sheet(self, worksheet):
"""Remove a worksheet from this workbook."""
self.worksheets.remove(worksheet)
def get_sheet_by_name(self, name):
"""Returns a worksheet by its name.
Returns None if no worksheet has the name specified.
:param name: the name of the worksheet to look for
:type name: string
"""
requested_sheet = None
for sheet in self.worksheets:
if sheet.title == name:
requested_sheet = sheet
break
return requested_sheet
def get_index(self, worksheet):
"""Return the index of the worksheet."""
return self.worksheets.index(worksheet)
def get_sheet_names(self):
"""Returns the list of the names of worksheets in the workbook.
Names are returned in the worksheets order.
:rtype: list of strings
"""
return [s.title for s in self.worksheets]
def create_named_range(self, name, worksheet, range):
"""Create a new named_range on a worksheet"""
assert isinstance(worksheet, Worksheet)
named_range = NamedRange(name, [(worksheet, range)])
self.add_named_range(named_range)
def get_named_ranges(self):
"""Return all named ranges"""
return self._named_ranges
def add_named_range(self, named_range):
"""Add an existing named_range to the list of named_ranges."""
self._named_ranges.append(named_range)
def get_named_range(self, name):
"""Return the range specified by name."""
requested_range = None
for named_range in self._named_ranges:
if named_range.name == name:
requested_range = named_range
break
return requested_range
def remove_named_range(self, named_range):
"""Remove a named_range from this workbook."""
self._named_ranges.remove(named_range)
def save(self, filename):
""" shortcut """
if self.__optimized_write:
save_dump(self, filename)
else:
save_workbook(self, filename)
| bsd-3-clause |
wnesl/gnuradio-IA | gnuradio-core/src/examples/network/audio_source.py | 18 | 2624 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
from gnuradio import audio
except ImportError:
sys.stderr.write("Failed to import gnuradio.audio. Make sure gr-audio component is installed.\n")
sys.exit(1)
class audio_source(gr.top_block):
def __init__(self, host, port, pkt_size, sample_rate, eof):
gr.top_block.__init__(self, "audio_source")
self.audio = audio.source(sample_rate)
self.sink = gr.udp_sink(gr.sizeof_float, host, port, pkt_size, eof=eof)
self.connect(self.audio, self.sink)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option)
parser.add_option("", "--host", type="string", default="localhost",
help="Remote host name (domain name or IP address")
parser.add_option("", "--port", type="int", default=65500,
help="port number to connect to")
parser.add_option("", "--packet-size", type="int", default=1472,
help="packet size.")
parser.add_option("-r", "--sample-rate", type="int", default=32000 ,
help="audio signal sample rate [default=%default]")
parser.add_option("", "--no-eof", action="store_true", default=False,
help="don't send EOF on disconnect")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
# Create an instance of a hierarchical block
top_block = audio_source(options.host, options.port,
options.packet_size, options.sample_rate,
not options.no_eof)
try:
# Run forever
top_block.run()
except KeyboardInterrupt:
# Ctrl-C exits
pass
| gpl-3.0 |
hynnet/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/imghdr.py | 259 | 3544 | """Recognize image file formats based on their first few bytes."""
__all__ = ["what"]
#-------------------------#
# Recognize image headers #
#-------------------------#
def what(file, h=None):
if h is None:
if isinstance(file, basestring):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
#---------------------------------#
# Subroutines per image file type #
#---------------------------------#
tests = []
def test_jpeg(h, f):
"""JPEG data in JFIF format"""
if h[6:10] == 'JFIF':
return 'jpeg'
tests.append(test_jpeg)
def test_exif(h, f):
"""JPEG data in Exif format"""
if h[6:10] == 'Exif':
return 'jpeg'
tests.append(test_exif)
def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
tests.append(test_png)
def test_gif(h, f):
"""GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_tiff(h, f):
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'):
return 'tiff'
tests.append(test_tiff)
def test_rgb(h, f):
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
tests.append(test_rgb)
def test_pbm(h, f):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_rast(h, f):
"""Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
"""X bitmap (X10 or X11)"""
s = '#define '
if h[:len(s)] == s:
return 'xbm'
tests.append(test_xbm)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
| gpl-2.0 |
igueths/ansible-modules-extras | network/nmcli.py | 61 | 40842 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
#
# This file is a module for Ansible that interacts with Network Manager
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION='''
---
module: nmcli
author: "Chris Long (@alcamie101)"
short_description: Manage Networking
requirements: [ nmcli, dbus ]
version_added: "2.0"
description:
- Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc.
options:
state:
required: True
choices: [ present, absent ]
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
autoconnect:
required: False
default: "yes"
choices: [ "yes", "no" ]
description:
- Whether the connection should start on boot.
- Whether the connection profile can be automatically activated
conn_name:
required: True
description:
- 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
ifname:
required: False
default: conn_name
description:
- Where IFNAME will be the what we call the interface name.
- interface to bind the connection to. The connection will only be applicable to this interface name.
- A special value of "*" can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
type:
required: False
choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ]
description:
- This is the type of device or network connection that you wish to create.
mode:
required: False
choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
default: balence-rr
description:
- This is the type of device or network connection that you wish to create for a bond, team or bridge.
master:
required: False
default: None
description:
- master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
ip4:
required: False
default: None
description:
- 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"'
gw4:
required: False
description:
- 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"'
dns4:
required: False
default: None
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]'
ip6:
required: False
default: None
description:
- 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
gw6:
required: False
default: None
description:
- 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
dns6:
required: False
description:
- 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]'
mtu:
required: False
default: 1500
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
primary:
required: False
default: None
description:
- This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
miimon:
required: False
default: 100
description:
- This is only used with bond - miimon
downdelay:
required: False
default: None
description:
- This is only used with bond - downdelay
updelay:
required: False
default: None
description:
- This is only used with bond - updelay
arp_interval:
required: False
default: None
description:
- This is only used with bond - ARP interval
arp_ip_target:
required: False
default: None
description:
- This is only used with bond - ARP IP target
stp:
required: False
default: None
description:
- This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
priority:
required: False
default: 128
description:
- This is only used with 'bridge' - sets STP priority
forwarddelay:
required: False
default: 15
description:
- This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
hellotime:
required: False
default: 2
description:
- This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
maxage:
required: False
default: 20
description:
- This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
ageingtime:
required: False
default: 300
description:
- This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
mac:
required: False
default: None
description:
- 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)'
slavepriority:
required: False
default: 32
description:
- This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
path_cost:
required: False
default: 100
description:
- This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
hairpin:
required: False
default: yes
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on.
vlanid:
required: False
default: None
description:
- This is only used with VLAN - VLAN ID in range <0-4095>
vlandev:
required: False
default: None
description:
- This is only used with VLAN - parent device this VLAN is on, can use ifname
flags:
required: False
default: None
description:
- This is only used with VLAN - flags
ingress:
required: False
default: None
description:
- This is only used with VLAN - VLAN ingress priority mapping
egress:
required: False
default: None
description:
- This is only used with VLAN - VLAN egress priority mapping
'''
EXAMPLES='''
The following examples are working examples that I have run in the field. I followed follow the structure:
```
|_/inventory/cloud-hosts
| /group_vars/openstack-stage.yml
| /host_vars/controller-01.openstack.host.com
| /host_vars/controller-02.openstack.host.com
|_/playbook/library/nmcli.py
| /playbook-add.yml
| /playbook-del.yml
```
## inventory examples
### groups_vars
```yml
---
#devops_os_define_network
storage_gw: "192.168.0.254"
external_gw: "10.10.0.254"
tenant_gw: "172.100.0.254"
#Team vars
nmcli_team:
- {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
- {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
nmcli_team_slave:
- {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
#bond vars
nmcli_bond:
- {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'}
- {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'}
- {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'}
nmcli_bond_slave:
- {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
#ethernet vars
nmcli_ethernet:
- {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
- {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
```
### host_vars
```yml
---
storage_ip: "192.168.160.21/23"
external_ip: "10.10.152.21/21"
tenant_ip: "192.168.200.21/23"
```
## playbook-add.yml example
```yml
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: install needed network manager libs
yum: name={{ item }} state=installed
with_items:
- libnm-qt-devel.x86_64
- nm-connection-editor.x86_64
- libsemanage-python
- policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
with_items:
- "{{nmcli_team}}"
- name: try nmcli add teams-slave
nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
with_items:
- "{{nmcli_team_slave}}"
###### Working with all cloud nodes - Bonding
# - name: try nmcli add bond - conn_name only & ip4 gw4 mode
# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present
# with_items:
# - "{{nmcli_bond}}"
#
# - name: try nmcli add bond-slave
# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
# with_items:
# - "{{nmcli_bond_slave}}"
##### Working with all cloud nodes - Ethernet
# - name: nmcli add Ethernet - conn_name only & ip4 gw4
# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
# with_items:
# - "{{nmcli_ethernet}}"
```
## playbook-del.yml example
```yml
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: try nmcli del team - multiple
nmcli: conn_name={{item.conn_name}} state=absent
with_items:
- { conn_name: 'em1'}
- { conn_name: 'em2'}
- { conn_name: 'p1p1'}
- { conn_name: 'p1p2'}
- { conn_name: 'p2p1'}
- { conn_name: 'p2p2'}
- { conn_name: 'tenant'}
- { conn_name: 'storage'}
- { conn_name: 'external'}
- { conn_name: 'team-em1'}
- { conn_name: 'team-em2'}
- { conn_name: 'team-p1p1'}
- { conn_name: 'team-p1p2'}
- { conn_name: 'team-p2p1'}
- { conn_name: 'team-p2p2'}
```
# To add an Ethernet connection with static IP configuration, issue a command as follows
- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# To add an Team connection with static IP configuration, issue a command as follows
- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present
# To add two IPv4 DNS server addresses:
-nmcli: conn_name=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present
# To change the property of a setting e.g. MTU, issue a command as follows:
- nmcli: conn_name=my-eth1 mtu=9000 state=present
Exit Status's:
- nmcli exits with status 0 if it succeeds, a value greater than 0 is
returned if an error occurs.
- 0 Success - indicates the operation succeeded
- 1 Unknown or unspecified error
- 2 Invalid user input, wrong nmcli invocation
- 3 Timeout expired (see --wait option)
- 4 Connection activation failed
- 5 Connection deactivation failed
- 6 Disconnecting device failed
- 7 Connection deletion failed
- 8 NetworkManager is not running
- 9 nmcli and NetworkManager versions mismatch
- 10 Connection, device, or access point does not exist.
'''
# import ansible.module_utils.basic
import os
import syslog
import sys
import dbus
from gi.repository import NetworkManager, NMClient
class Nmcli(object):
"""
This is the generic nmcli manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:-
- create_connection()
- delete_connection()
- modify_connection()
- show_connection()
- up_connection()
- down_connection()
All subclasses MUST define platform and distribution (which may be None).
"""
platform='Generic'
distribution=None
bus=dbus.SystemBus()
# The following is going to be used in dbus code
DEVTYPES={1: "Ethernet",
2: "Wi-Fi",
5: "Bluetooth",
6: "OLPC",
7: "WiMAX",
8: "Modem",
9: "InfiniBand",
10: "Bond",
11: "VLAN",
12: "ADSL",
13: "Bridge",
14: "Generic",
15: "Team"
}
STATES={0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed"
}
def __init__(self, module):
self.module=module
self.state=module.params['state']
self.autoconnect=module.params['autoconnect']
self.conn_name=module.params['conn_name']
self.master=module.params['master']
self.ifname=module.params['ifname']
self.type=module.params['type']
self.ip4=module.params['ip4']
self.gw4=module.params['gw4']
self.dns4=module.params['dns4']
self.ip6=module.params['ip6']
self.gw6=module.params['gw6']
self.dns6=module.params['dns6']
self.mtu=module.params['mtu']
self.stp=module.params['stp']
self.priority=module.params['priority']
self.mode=module.params['mode']
self.miimon=module.params['miimon']
self.downdelay=module.params['downdelay']
self.updelay=module.params['updelay']
self.arp_interval=module.params['arp_interval']
self.arp_ip_target=module.params['arp_ip_target']
self.slavepriority=module.params['slavepriority']
self.forwarddelay=module.params['forwarddelay']
self.hellotime=module.params['hellotime']
self.maxage=module.params['maxage']
self.ageingtime=module.params['ageingtime']
self.mac=module.params['mac']
self.vlanid=module.params['vlanid']
self.vlandev=module.params['vlandev']
self.flags=module.params['flags']
self.ingress=module.params['ingress']
self.egress=module.params['egress']
# select whether we dump additional debug info through syslog
self.syslogging=True
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def merge_secrets(self, proxy, config, setting_name):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
secrets=proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key]=secrets[setting][key]
except Exception, e:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
dstr=""
for key in d:
val=d[key]
str_val=""
add_string=True
if type(val)==type(dbus.Array([])):
for elt in val:
if type(elt)==type(dbus.Byte(1)):
str_val+="%s " % int(elt)
elif type(elt)==type(dbus.String("")):
str_val+="%s" % elt
elif type(val)==type(dbus.Dictionary({})):
dstr+=self.dict_to_string(val)
add_string=False
else:
str_val=val
if add_string:
dstr+="%s: %s\n" % ( key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
setting_list=[]
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
# print ""
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus=dbus.SystemBus()
service_name="org.freedesktop.NetworkManager"
proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
connection_paths=settings.ListConnections()
connection_list=[]
# List each connection's name, UUID, and type
for path in connection_paths:
con_proxy=bus.get_object(service_name, path)
settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
config=settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
# you only need 'wifi' secrets or '802.1x' secrets, not everything) and
# merge that into the configuration data - To use at a later stage
self.merge_secrets(settings_connection, config, '802-11-wireless')
self.merge_secrets(settings_connection, config, '802-11-wireless-security')
self.merge_secrets(settings_connection, config, '802-1x')
self.merge_secrets(settings_connection, config, 'gsm')
self.merge_secrets(settings_connection, config, 'cdma')
self.merge_secrets(settings_connection, config, 'ppp')
# Get the details of the 'connection' setting
s_con=config['connection']
connection_list.append(s_con['id'])
connection_list.append(s_con['uuid'])
connection_list.append(s_con['type'])
connection_list.append(self.connection_to_string(config))
return connection_list
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
connections=self.list_connection_info()
for con_item in connections:
if self.conn_name==con_item:
return True
def down_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# if self.connection_exists():
cmd.append('con')
cmd.append('down')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def up_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('up')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def create_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('team')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def modify_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
# Can't use MTU with team
return cmd
def create_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append(self.type)
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
# if self.mtu is not None:
# cmd.append('802-3-ethernet.mtu')
# cmd.append(self.mtu)
return cmd
def modify_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
return cmd
def create_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('bond')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
if self.mode is not None:
cmd.append('mode')
cmd.append(self.mode)
if self.miimon is not None:
cmd.append('miimon')
cmd.append(self.miimon)
if self.downdelay is not None:
cmd.append('downdelay')
cmd.append(self.downdelay)
if self.downdelay is not None:
cmd.append('updelay')
cmd.append(self.updelay)
if self.downdelay is not None:
cmd.append('arp-interval')
cmd.append(self.arp_interval)
if self.downdelay is not None:
cmd.append('arp-ip-target')
cmd.append(self.arp_ip_target)
return cmd
def modify_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def create_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append('bond-slave')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
return cmd
def create_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('ethernet')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def modify_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def create_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bridge interface
return cmd
def modify_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bridge interface
return cmd
def create_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
return cmd
def modify_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
return cmd
def create_connection(self):
cmd=[]
if self.type=='team':
# cmd=self.create_connection_team()
if (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_team()
self.execute_command(cmd)
cmd=self.modify_connection_team()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
elif (self.dns4 is None) or (self.dns6 is None):
cmd=self.create_connection_team()
return self.execute_command(cmd)
elif self.type=='team-slave':
if self.mtu is not None:
cmd=self.create_connection_team_slave()
self.execute_command(cmd)
cmd=self.modify_connection_team_slave()
self.execute_command(cmd)
# cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_team_slave()
return self.execute_command(cmd)
elif self.type=='bond':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_bond()
self.execute_command(cmd)
cmd=self.modify_connection_bond()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_bond()
return self.execute_command(cmd)
elif self.type=='bond-slave':
cmd=self.create_connection_bond_slave()
elif self.type=='ethernet':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_ethernet()
self.execute_command(cmd)
cmd=self.modify_connection_ethernet()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_ethernet()
return self.execute_command(cmd)
elif self.type=='bridge':
cmd=self.create_connection_bridge()
elif self.type=='vlan':
cmd=self.create_connection_vlan()
return self.execute_command(cmd)
def remove_connection(self):
# self.down_connection()
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('del')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def modify_connection(self):
cmd=[]
if self.type=='team':
cmd=self.modify_connection_team()
elif self.type=='team-slave':
cmd=self.modify_connection_team_slave()
elif self.type=='bond':
cmd=self.modify_connection_bond()
elif self.type=='bond-slave':
cmd=self.modify_connection_bond_slave()
elif self.type=='ethernet':
cmd=self.modify_connection_ethernet()
elif self.type=='bridge':
cmd=self.modify_connection_bridge()
elif self.type=='vlan':
cmd=self.modify_connection_vlan()
return self.execute_command(cmd)
def main():
# Parsing argument file
module=AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
conn_name=dict(required=True, type='str'),
master=dict(required=False, default=None, type='str'),
ifname=dict(required=False, default=None, type='str'),
type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'),
ip4=dict(required=False, default=None, type='str'),
gw4=dict(required=False, default=None, type='str'),
dns4=dict(required=False, default=None, type='str'),
ip6=dict(required=False, default=None, type='str'),
gw6=dict(required=False, default=None, type='str'),
dns6=dict(required=False, default=None, type='str'),
# Bond Specific vars
mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'),
miimon=dict(required=False, default=None, type='str'),
downdelay=dict(required=False, default=None, type='str'),
updelay=dict(required=False, default=None, type='str'),
arp_interval=dict(required=False, default=None, type='str'),
arp_ip_target=dict(required=False, default=None, type='str'),
# general usage
mtu=dict(required=False, default=None, type='str'),
mac=dict(required=False, default=None, type='str'),
# bridge specific vars
stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'),
priority=dict(required=False, default="128", type='str'),
slavepriority=dict(required=False, default="32", type='str'),
forwarddelay=dict(required=False, default="15", type='str'),
hellotime=dict(required=False, default="2", type='str'),
maxage=dict(required=False, default="20", type='str'),
ageingtime=dict(required=False, default="300", type='str'),
# vlan specific vars
vlanid=dict(required=False, default=None, type='str'),
vlandev=dict(required=False, default=None, type='str'),
flags=dict(required=False, default=None, type='str'),
ingress=dict(required=False, default=None, type='str'),
egress=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
nmcli=Nmcli(module)
rc=None
out=''
err=''
result={}
result['conn_name']=nmcli.conn_name
result['state']=nmcli.state
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
# team-slave checks
if nmcli.type=='team-slave' and nmcli.master is None:
nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
if nmcli.type=='team-slave' and nmcli.ifname is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
if nmcli.state=='absent':
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.down_connection()
(rc, out, err)=nmcli.remove_connection()
if rc!=0:
module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state=='present':
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
result['Exists']='Connections do exist so we are modifying them'
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.modify_connection()
if not nmcli.connection_exists():
result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.create_connection()
if rc is not None and rc!=0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
result['changed']=False
else:
result['changed']=True
if out:
result['stdout']=out
if err:
result['stderr']=err
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geoapp/tests.py | 47 | 37664 | from __future__ import absolute_import
import re
from django.db import connection
from django.contrib.gis import gdal
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, no_mysql, no_oracle, no_spatialite,
mysql, oracle, postgis, spatialite)
from django.test import TestCase
from django.utils import six, unittest
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.geos import (fromstr, GEOSGeometry,
Point, LineString, LinearRing, Polygon, GeometryCollection)
from .models import Country, City, PennsylvaniaCity, State, Track
if HAS_GEOS and not spatialite:
from .models import Feature, MinusOneSRID
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0,0,0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoModelTest(TestCase):
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
## Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
## Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertEqual(True, isinstance(ns.poly.ogr, gdal.OGRGeometry))
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertEqual(True, isinstance(ns.poly.srs, gdal.SpatialReference))
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@no_mysql
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157)) FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)' # Used ogr.py in gdal 1.4.1 for this transform
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
# SpatiaLite does not support missing SRID values.
if not spatialite:
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
@no_spatialite # SpatiaLite does not support abstract geometry columns
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertEqual(True, isinstance(f_1.geom, Point))
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertEqual(True, isinstance(f_2.geom, LineString))
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertEqual(True, isinstance(f_3.geom, Polygon))
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertEqual(True, isinstance(f_4.geom, GeometryCollection))
self.assertEqual(f_3.geom, f_4.geom[2])
@no_mysql
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs: self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText(%s)' if postgis else connection.ops.select
cities2 = City.objects.raw(
'select id, name, %s from geoapp_city' % as_text % 'point'
)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertTrue(isinstance(cities2[0].point, Point))
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoLookupTest(TestCase):
@no_mysql
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if not oracle:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs: self.assertEqual(True, c.name in cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not spatialite:
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(0, len(Country.objects.filter(mpoly__contains=pueblo.point))) # Query w/GEOSGeometry object
self.assertEqual((mysql and 1) or 0,
len(Country.objects.filter(mpoly__contains=okcity.point.wkt))) # Qeury w/WKT
# OK City is contained w/in bounding box of Texas.
if not oracle:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
# Only PostGIS has `left` and `right` lookup types.
@no_mysql
@no_oracle
@no_spatialite
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs: self.assertEqual(True, c.name in cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs: self.assertEqual(True, c.name in cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs: self.assertEqual(True, c.name in cities)
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
test_left_right_lookups = unittest.expectedFailure(test_left_right_lookups)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]: self.assertEqual('Houston', c.name)
@no_mysql
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertEqual(True, 'Colorado' in state_names)
self.assertEqual(True, 'Kansas' in state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geomery and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertEqual(None, State.objects.get(name='Northern Mariana Islands').poly)
@no_mysql
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param shoud
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoQuerySetTest(TestCase):
# Please keep the tests in GeoQuerySet method's alphabetic order
@no_mysql
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertEqual(True, s.poly.centroid.equals_exact(s.centroid, tol))
@no_mysql
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
tol = 1
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwey with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
self.assertEqual(c.mpoly.sym_difference(geom), c.sym_difference)
self.assertEqual(c.mpoly.union(geom), c.union)
@no_mysql
@no_spatialite # SpatiaLite does not have an Extent function
def test_extent(self):
"Testing the `extent` GeoQuerySet method."
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.extent()
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
@no_mysql
@no_oracle
@no_spatialite
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = ( ( (0, 0), (5, 0), (0, 5), (0, 0) ),
( (1, 1), (1, 3), (3, 1), (1, 1) ),
)
rhr_rings = ( ( (0, 0), (0, 5), (5, 0), (0, 0) ),
( (1, 1), (3, 1), (1, 3), (1, 1) ),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@no_mysql
@no_oracle
@no_spatialite
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
if not connection.ops.geohash: return
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS 1.3.4+ and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.305196,48.462611]}'
chicago_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
if postgis and connection.ops.spatial_version < (1, 4, 0):
pueblo_json = '{"type":"Point","coordinates":[-104.60925200,38.25500100]}'
houston_json = '{"type":"Point","crs":{"type":"EPSG","properties":{"EPSG":4326}},"coordinates":[-95.36315100,29.76337400]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.30519600,48.46261100]}'
elif spatialite:
victoria_json = '{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],"coordinates":[-123.305196,48.462611]}'
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Victoria';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# 1.(3|4).x: SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(chicago_json, City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson)
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
if mysql or (spatialite and not connection.ops.gml) :
self.assertRaises(NotImplementedError, Country.objects.all().gml, field_name='mpoly')
return
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml"><gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ </gml:coordinates></gml:Point>')
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>')
else:
gml_regex = re.compile(r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>')
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
# PostGIS < 1.5 doesn't include dimension im GMLv3 output.
if postgis and connection.ops.spatial_version >= (1, 5, 0):
self.assertIn('<gml:pos srsDimension="2">',
City.objects.gml(version=3).get(name='Pueblo').gml)
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Only PostGIS and Spatialite (>=2.4.0-RC4) support KML serialization
if not (postgis or (spatialite and connection.ops.kml)):
self.assertRaises(NotImplementedError, State.objects.all().kml, field_name='poly')
return
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# The reference KML depends on the version of PostGIS used
# (the output stopped including altitude in 1.3.3).
if connection.ops.spatial_version >= (1, 3, 3):
ref_kml = '<Point><coordinates>-104.609252,38.255001</coordinates></Point>'
else:
ref_kml = '<Point><coordinates>-104.609252,38.255001,0</coordinates></Point>'
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual(ref_kml, ptown.kml)
# Only PostGIS has support for the MakeLine aggregate.
@no_mysql
@no_oracle
@no_spatialite
def test_make_line(self):
"Testing the `make_line` GeoQuerySet method."
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry('LINESTRING(-95.363151 29.763374,-96.801611 32.782057,-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)', srid=4326)
self.assertEqual(ref_line, City.objects.make_line())
@no_mysql
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0
# will return None.
if postgis and connection.ops.spatial_version < (2, 0, 0):
self.assertIsNone(c.num_geom)
else:
self.assertEqual(1, c.num_geom)
@no_mysql
@no_spatialite # SpatiaLite can only count vertices in LineStrings
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points(): self.assertEqual(1, c.num_points)
@no_mysql
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05)) FROM GEOAPP_COUNTRY;
ref = {'New Zealand' : fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas' : fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
elif postgis or spatialite:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand' : Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas' : Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertEqual(True, ref[c.name].equals_exact(c.point_on_surface, tol))
@no_mysql
@no_spatialite
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [ (-95.363151, 29.763374), (-95.448601, 29.713803) ]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@no_mysql
@no_oracle
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@no_mysql
@no_oracle
@no_spatialite
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
sm = Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid, tol))
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
if mysql or oracle:
self.assertRaises(NotImplementedError, City.objects.svg)
return
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@no_mysql
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@no_mysql
@no_oracle
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
# TODO: Oracle can be made to pass if
# union1 = union2 = fromstr('POINT (-97.5211570000000023 34.4646419999999978)')
# but this seems unexpected and should be investigated to determine the cause.
@no_mysql
@no_oracle
def test_unionagg(self):
"Testing the `unionagg` (aggregate union) GeoQuerySet method."
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Oracle has different order.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
tol = 0.00001
if oracle:
union = union2
else:
union = union1
self.assertEqual(True, union.equals_exact(u1, tol))
self.assertEqual(True, union.equals_exact(u2, tol))
qs = City.objects.filter(name='NotACity')
self.assertEqual(None, qs.unionagg(field_name='point'))
def test_non_concrete_field(self):
pkfield = City._meta.get_field_by_name('id')[0]
orig_pkfield_col = pkfield.column
pkfield.column = None
try:
list(City.objects.all())
finally:
pkfield.column = orig_pkfield_col
| apache-2.0 |
katrid/django | tests/view_tests/generic_urls.py | 329 | 1356 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = [
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}),
url(r'^accounts/logout/$', auth_views.logout),
# Special URLs for particular regression cases.
url('^中文/target/$', views.index_page),
]
# redirects, both temporary and permanent, with non-ASCII targets
urlpatterns += [
url('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
url('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
]
# json response
urlpatterns += [
url(r'^json/response/$', views.json_response_view),
]
| bsd-3-clause |
pdellaert/ansible | test/units/modules/network/fortios/test_fortios_ips_rule.py | 21 | 11633 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_ips_rule
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_ips_rule.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_ips_rule_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_rule': {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
expected_data = {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log-packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule-id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
}
set_method_mock.assert_called_with('ips', 'rule', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_ips_rule_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_rule': {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
expected_data = {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log-packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule-id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
}
set_method_mock.assert_called_with('ips', 'rule', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_ips_rule_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'ips_rule': {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
delete_method_mock.assert_called_with('ips', 'rule', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_ips_rule_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'ips_rule': {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
delete_method_mock.assert_called_with('ips', 'rule', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_ips_rule_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_rule': {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
expected_data = {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log-packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule-id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
}
set_method_mock.assert_called_with('ips', 'rule', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_ips_rule_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_rule': {
'random_attribute_not_valid': 'tag',
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log_packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule_id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_rule.fortios_ips(input_data, fos_instance)
expected_data = {
'action': 'pass',
'application': 'test_value_4',
'date': '5',
'group': 'test_value_6',
'location': 'test_value_7,',
'log': 'disable',
'log-packet': 'disable',
'name': 'default_name_10',
'os': 'test_value_11',
'rev': '12',
'rule-id': '13',
'service': 'test_value_14',
'severity': 'test_value_15,',
'status': 'disable'
}
set_method_mock.assert_called_with('ips', 'rule', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
madj4ck/ansible | v1/ansible/runner/connection_plugins/ssh.py | 104 | 19988 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import subprocess
import shlex
import pipes
import random
import select
import fcntl
import hmac
import pwd
import gettext
import pty
from hashlib import sha1
import ansible.constants as C
from ansible.callbacks import vvv
from ansible import errors
from ansible import utils
class Connection(object):
''' ssh based connections '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
self.runner = runner
self.host = host
self.ipv6 = ':' in self.host
self.port = port
self.user = str(user)
self.password = password
self.private_key_file = private_key_file
self.HASHED_KEY_MAGIC = "|1|"
self.has_pipelining = True
# TODO: add pbrun, pfexec
self.become_methods_supported=['sudo', 'su', 'pbrun']
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
def connect(self):
''' connect to the remote host '''
vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
self.common_args = []
extra_args = C.ANSIBLE_SSH_ARGS
if extra_args is not None:
# make sure there is no empty string added as this can produce weird errors
self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
else:
self.common_args += ["-o", "ControlMaster=auto",
"-o", "ControlPersist=60s",
"-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
cp_in_use = False
cp_path_set = False
for arg in self.common_args:
if "ControlPersist" in arg:
cp_in_use = True
if "ControlPath" in arg:
cp_path_set = True
if cp_in_use and not cp_path_set:
self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
if not C.HOST_KEY_CHECKING:
self.common_args += ["-o", "StrictHostKeyChecking=no"]
if self.port is not None:
self.common_args += ["-o", "Port=%d" % (self.port)]
if self.private_key_file is not None:
self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
elif self.runner.private_key_file is not None:
self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
if self.password:
self.common_args += ["-o", "GSSAPIAuthentication=no",
"-o", "PubkeyAuthentication=no"]
else:
self.common_args += ["-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no"]
if self.user != pwd.getpwuid(os.geteuid())[0]:
self.common_args += ["-o", "User="+self.user]
self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
return self
def _run(self, cmd, indata):
if indata:
# do not use pseudo-pty
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
else:
# try to use upseudo-pty
try:
# Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdin=slave,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'w', 0)
os.close(slave)
except:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
return (p, stdin)
def _password_cmd(self):
if self.password:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
except OSError:
raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
(self.rfd, self.wfd) = os.pipe()
return ["sshpass", "-d%d" % self.rfd]
return []
def _send_password(self):
if self.password:
os.close(self.rfd)
os.write(self.wfd, "%s\n" % self.password)
os.close(self.wfd)
def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
stdout = ''
stderr = ''
rpipes = [p.stdout, p.stderr]
if indata:
try:
stdin.write(indata)
stdin.close()
except:
raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
# Read stdout/stderr from process
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
# fail early if the become password is wrong
if self.runner.become and sudoable:
incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
if prompt:
if self.runner.become_pass:
if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
raise errors.AnsibleError('Incorrect become password')
if stdout.endswith(prompt):
raise errors.AnsibleError('Missing become password')
elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
raise errors.AnsibleError('Incorrect become password')
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 9000)
stdout += dat
if dat == '':
rpipes.remove(p.stdout)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 9000)
stderr += dat
if dat == '':
rpipes.remove(p.stderr)
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfd) and p.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually p.poll() is always None here if rpipes is empty
elif not rpipes and p.poll() == None:
p.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
return (p.returncode, stdout, stderr)
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
host_fh = open(hf)
except IOError, e:
hfiles_not_found += 1
continue
else:
data = host_fh.read()
host_fh.close()
for line in data.split("\n"):
line = line.strip()
if line is None or " " not in line:
continue
tokens = line.split()
if not tokens:
continue
if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
if (hfiles_not_found == len(host_file_list)):
vvv("EXEC previous known host file not found for %s" % host)
return True
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
ssh_cmd = self._password_cmd()
ssh_cmd += ["ssh", "-C"]
if not in_data:
# we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
# inside a tty automatically invokes the python interactive-mode but the modules are not
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_cmd += ["-tt"]
if utils.VERBOSITY > 3:
ssh_cmd += ["-vvv"]
else:
if self.runner.module_name == 'raw':
ssh_cmd += ["-q"]
else:
ssh_cmd += ["-v"]
ssh_cmd += self.common_args
if self.ipv6:
ssh_cmd += ['-6']
ssh_cmd += [self.host]
if self.runner.become and sudoable:
becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
ssh_cmd.append(becomecmd)
else:
prompt = None
if executable:
ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
else:
ssh_cmd.append(cmd)
vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
not_in_host_file = self.not_in_host_file(self.host)
if C.HOST_KEY_CHECKING and not_in_host_file:
# lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
# create process
(p, stdin) = self._run(ssh_cmd, in_data)
self._send_password()
no_prompt_out = ''
no_prompt_err = ''
if sudoable and self.runner.become and self.runner.become_pass:
# several cases are handled for escalated privileges with password
# * NOPASSWD (tty & no-tty): detect success_key on stdout
# * without NOPASSWD:
# * detect prompt on stdout (tty)
# * detect prompt on stderr (no-tty)
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
become_output = ''
become_errput = ''
while True:
if success_key in become_output or \
(prompt and become_output.endswith(prompt)) or \
utils.su_prompts.check_su_prompt(become_output):
break
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout], self.runner.timeout)
if p.stderr in rfd:
chunk = p.stderr.read()
if not chunk:
raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
become_errput += chunk
incorrect_password = gettext.dgettext(
"become", "Sorry, try again.")
if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
raise errors.AnsibleError('Incorrect become password')
elif prompt and become_errput.endswith(prompt):
stdin.write(self.runner.become_pass + '\n')
if p.stdout in rfd:
chunk = p.stdout.read()
if not chunk:
raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
become_output += chunk
if not rfd:
# timeout. wrap up process communication
stdout = p.communicate()
raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
if success_key in become_output:
no_prompt_out += become_output
no_prompt_err += become_errput
elif sudoable:
stdin.write(self.runner.become_pass + '\n')
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
if C.HOST_KEY_CHECKING and not_in_host_file:
# lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
'unknown configuration option: ControlPersist' in stderr
if C.HOST_KEY_CHECKING:
if ssh_cmd[0] == "sshpass" and p.returncode == 6:
raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
if p.returncode != 0 and controlpersisterror:
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
if p.returncode == 255:
ip = None
port = None
for line in stderr.splitlines():
match = re.search(
'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)',
line)
if match:
ip = match.group(1)
port = match.group(2)
if 'UNPROTECTED PRIVATE KEY FILE' in stderr:
lines = [line for line in stderr.splitlines()
if 'ignore key:' in line]
else:
lines = stderr.splitlines()[-1:]
if ip and port:
lines.append(' while connecting to %s:%s' % (ip, port))
lines.append(
'It is sometimes useful to re-run the command using -vvvv, '
'which prints SSH debug output to help diagnose the issue.')
raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines))
return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
cmd = self._password_cmd()
host = self.host
if self.ipv6:
host = '[%s]' % host
if C.DEFAULT_SCP_IF_SSH:
cmd += ["scp"] + self.common_args
cmd += [in_path,host + ":" + pipes.quote(out_path)]
indata = None
else:
cmd += ["sftp"] + self.common_args + [host]
indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
(p, stdin) = self._run(cmd, indata)
self._send_password()
(returncode, stdout, stderr) = self._communicate(p, stdin, indata)
if returncode != 0:
raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
cmd = self._password_cmd()
host = self.host
if self.ipv6:
host = '[%s]' % host
if C.DEFAULT_SCP_IF_SSH:
cmd += ["scp"] + self.common_args
cmd += [host + ":" + in_path, out_path]
indata = None
else:
cmd += ["sftp"] + self.common_args + [host]
indata = "get %s %s\n" % (in_path, out_path)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._send_password()
stdout, stderr = p.communicate(indata)
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
def close(self):
''' not applicable since we're executing openssh binaries '''
pass
| gpl-3.0 |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/boto/dynamodb/__init__.py | 145 | 1697 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon DynamoDB service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
import boto.dynamodb.layer2
return get_regions('dynamodb', connection_cls=boto.dynamodb.layer2.Layer2)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
vitorio/kivy | kivy/input/postproc/ignorelist.py | 59 | 1365 | '''
Ignore list
===========
Ignore touch on some areas of the screen
'''
__all__ = ('InputPostprocIgnoreList', )
from kivy.config import Config
from kivy.utils import strtotuple
class InputPostprocIgnoreList(object):
'''
InputPostprocIgnoreList is a post-processor which removes touches in the
Ignore list. The Ignore list can be configured in the Kivy config file::
[postproc]
# Format: [(xmin, ymin, xmax, ymax), ...]
ignore = [(0.1, 0.1, 0.15, 0.15)]
The Ignore list coordinates are in the range 0-1, not in screen pixels.
'''
def __init__(self):
self.ignore_list = strtotuple(Config.get('postproc', 'ignore'))
def collide_ignore(self, touch):
x, y = touch.sx, touch.sy
for l in self.ignore_list:
xmin, ymin, xmax, ymax = l
if x > xmin and x < xmax and y > ymin and y < ymax:
return True
def process(self, events):
if not len(self.ignore_list):
return events
for etype, touch in events:
if not touch.is_touch:
continue
if etype != 'begin':
continue
if self.collide_ignore(touch):
touch.ud.__pp_ignore__ = True
return [(etype, touch) for etype, touch in events
if not '__pp_ignore__' in touch.ud]
| mit |
robin-lai/scikit-learn | doc/tutorial/machine_learning_map/pyparsing.py | 258 | 137838 | # module pyparsing.py
#
# Copyright (c) 2003-2008 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.4.11"
__versionTime__ = "10 February 2008 17:28"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy,sys
import warnings
import re
import sre_constants
import xml.sax.saxutils
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
if sys.version_info[0] > 2:
__MAX_INT__ = sys.maxsize
__BASE_STRING__ = str
else:
__MAX_INT__ = sys.maxint
__BASE_STRING__ = basestring
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
#~ return set( [c for c in strg] )
class _Constants(object):
pass
alphas = string.lowercase + string.uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = "\\"
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
__slots__ = ( "loc","msg","pstr","parserElement" )
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError, aname
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
# this line is related to debugging the asXML bug
#~ asList = False
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,__BASE_STRING__):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),-1)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),-1)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = range(*i.indices(mylen))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return self.__tokdict.has_key(k)
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
def __nonzero__( self ): return self.__bool__()
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > j))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict.keys()]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if self.__tokdict.has_key( name ):
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = xml.sax.saxutils.escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match",_ustr(expr),"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched",_ustr(expr),"->",toks.asList())
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:", _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if f.func_code.co_flags & STAR_ARGS:
return f
numargs = f.func_code.co_argcount
if hasattr(f,"im_self"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if f.__call__.im_func.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.im_func.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
except AttributeError:
# not a bound method, get info directly from __call__ method
if f.__call__.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseException, err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
return self._parse( instring, loc, doActions=False )[0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
if isinstance(value,ParseBaseException):
value.loc = loc
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if self.keepTabs:
loc, tokens = self._parse( instring, 0 )
else:
loc, tokens = self._parse( instring.expandtabs(), 0 )
return tokens
def scanString( self, instring, maxMatches=__MAX_INT__ ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
def searchString( self, instring, maxMatches=__MAX_INT__ ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __mul__(self,other):
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
if len(other)==2:
if isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("can only multiply 'ParserElement' and int or (int,int) objects")
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
ret = And([self]*minElements)+ makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents)
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError, "no such attribute " + aname
def __eq__(self,other):
if isinstance(other, __BASE_STRING__):
try:
(self + StringEnd()).parseString(_ustr(other))
return True
except ParseException:
return False
else:
return super(ParserElement,self)==other
def __req__(self,other):
return self == other
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d.keys():
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,__BASE_STRING__):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, __BASE_STRING__ ):
self.exprs = [ Literal( exprs ) ]
else:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
for e in self.exprs[1:]:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.optionals = [ e.expr for e in exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
def parseImpl( self, instring, loc, doActions=True ):
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, __BASE_STRING__ ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also consumed. The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None ):
super( SkipTo, self ).__init__( other )
if ignore is not None:
self.expr = self.expr.copy()
self.expr.ignore(ignore)
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
while loc <= instrlen:
try:
loc = expr._skipIgnorables( instring, loc )
expr._parse( instring, loc, doActions=False, callPreParse=False )
if self.includeMatch:
skipText = instring[startLoc:loc]
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ instring[startLoc:loc] ]
except (ParseException,IndexError):
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = Forward
return "Forward: "+retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return map( string.upper, tokenlist )
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception, exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,"\\"+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = strs[:]
elif isinstance(strs,__BASE_STRING__):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException, "incorrect usage of keepOriginalText - may only be called as a parse action"
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException, "incorrect usage of getTokensEndLoc - may only be called from within a parse action"
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,__BASE_STRING__):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal
- numTerms is the number of terms for this operator (must
be 1 or 2)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = Group( FollowedBy(lastExpr + opExpr) + lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + lastExpr) + lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + thisExpr) + lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
else:
raise ValueError, "operator must indicate right or left associativity"
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,__BASE_STRING__) and isinstance(closer,__BASE_STRING__):
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";")
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),"><& '"))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
print (teststring,"->",)
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (tokenlist)
print ("tokens = ", tokens)
print ("tokens.columns =", tokens.columns)
print ("tokens.tables =", tokens.tables)
print (tokens.asXML("SQL",True))
except ParseException,err:
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| bsd-3-clause |
jaumemarti/l10n-spain-txerpa | l10n_es_payment_order/wizard/csb32.py | 1 | 10991 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2006 ACYSOS S.L. (http://acysos.com)
# Pedro Tarrafeta <pedro@acysos.com>
# Copyright (c) 2008 Pablo Rocandio. All Rights Reserved.
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com)
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# Corregido para instalación TinyERP estándar 4.2.0: Zikzakmedia S.L. 2008
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Añadidas cuentas de remesas y tipos de pago. 2008
# Pablo Rocandio <salbet@gmail.com>
#
# Rehecho de nuevo para instalación OpenERP 5.0.0 sobre
# account_payment_extension: Zikzakmedia S.L. 2009
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Refactorización. Acysos S.L. (http://www.acysos.com) 2012
# Ignacio Ibeas <ignacio@acysos.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from datetime import datetime
from openerp.tools.translate import _
from .log import Log
class Csb32(orm.Model):
_name = 'csb.32'
_auto = False
def _cabecera_fichero_32(self, cr, uid):
converter = self.pool['payment.converter.spain']
texto = '0265'
texto += ' '
texto += datetime.today().strftime('%d%m%y')
texto += converter.digits_only(cr, uid, self.order.reference)[-4:]
texto += ' '*35
texto += converter.digits_only(
cr, uid, self.order.mode.bank_id.acc_number)[:8]
texto += ' '*6
texto += ' '*61
texto += ' '*24
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Cabecera fichero 32', texto), True)
return texto
def _cabecera_remesa_32(self, cr, uid, context):
converter = self.pool['payment.converter.spain']
# A:
texto = '1165'
texto += ' '
# B
texto += datetime.today().strftime('%d%m%y')
texto += '0001'
texto += ' '*12
# C
# TODO: Identificador del cedente. Qué es?
texto += converter.convert(
cr, uid, self.order.mode.cedente, 15, context)
texto += '1' # Identificativo de efectos truncados
texto += ' '*21
# D
texto += converter.digits_only(cr, uid,
self.order.mode.bank_id.acc_number)
texto += converter.digits_only(cr, uid,
self.order.mode.bank_id.acc_number)
texto += converter.digits_only(cr, uid,
self.order.mode.bank_id.acc_number)
texto += ' ' + ' '*24
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Cabecera remesa 32', texto), True)
return texto
def _registro_individual_i_32(self, cr, uid, recibo, context):
converter = self.pool['payment.converter.spain']
# A
texto = '2565'
texto += ' '
# B
texto += converter.convert(cr, uid, self.num_recibos+1, 15, context)
texto += datetime.today().strftime('%d%m%y')
texto += '0001'
# C
state = (self.order.mode.bank_id.state_id and
self.order.mode.bank_id.state_id.code or False)
texto += converter.convert(cr, uid, state, 2, context)
texto += ' '*7
texto += ' '
# D
texto += converter.convert(
cr, uid, self.order.mode.bank_id.city, 20, context)
texto += ' '
# E
texto += ' '*24
texto += converter.convert(cr, uid, abs(recibo['amount']), 9, context)
texto += ' '*15
texto += datetime.strptime(
recibo['ml_maturity_date'], '%Y-%m-%d').strftime('%d%m%y')
texto += ' '*(6+6+1+4+16)
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Registro individual I 32', texto), True)
return texto
def _registro_individual_ii_32(self, cr, uid, recibo, context):
converter = self.pool['payment.converter.spain']
# A: Identificacion de la operacion
texto = '2665'
texto += ' '
# B: Datos del efecto
texto += converter.convert(cr, uid, self.num_recibos+1, 15, context)
texto += ' '
texto += '2' # Recibo
texto += '000000'
texto += '1'
# 0= Sin gastos, 1=Con gastos, 9=Orden expresa de protesto notarial
texto += '0'
# C: Datos del efecto
ccc = recibo['bank_id'] and recibo['bank_id'].acc_number or ''
if ccc:
texto += ccc[:20].zfill(20)
else:
texto += ' '*20
# D: Datos del efecto
texto += converter.convert(cr, uid, self.order.mode.partner_id.name,
34, context)
texto += converter.convert(
cr, uid, recibo['partner_id'].name, 34, context)
texto += ' '*30
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Registro individual II 32', texto), True)
return texto
def _registro_individual_iii_32(self, cr, uid, recibo, context):
converter = self.pool['payment.converter.spain']
# A: Identificacion de la operacion
texto = '2765'
texto += ' '
# B: Datos del efecto
texto += converter.convert(cr, uid, self.num_recibos+1, 15, context)
texto += ' '
addresses = self.pool['res.partner'].address_get(
cr, uid, [recibo['partner_id'].id])
# if not addresses:
# print "NO ADDRESSES"
address = self.pool['res.partner'].browse(cr, uid,
addresses['default'],
context)
texto += converter.convert(cr, uid, address.street, 34, context)
texto += converter.convert(cr, uid, address.zip, 5, context)
texto += converter.convert(cr, uid, address.city, 20, context)
texto += converter.convert(cr, uid, address.state_id and
address.state_id.code or False, 2, context)
texto += '0'*7
# C: Datos del efecto
vat = (recibo['partner_id'].vat and recibo['partner_id'].vat[2:] or
False)
texto += converter.convert(cr, uid, vat, 9, context)
texto += ' '*50
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Registro individual III 32', texto), True)
return texto
def _registro_fin_remesa_32(self, cr, uid, context):
converter = self.pool['payment.converter.spain']
# A: Identificación de la operación
texto = '7165'
texto += ' '
# B: Control de duplicidades
texto += datetime.today().strftime('%d%m%y')
texto += '0001'
texto += ' '*(6+6)
# C: Libre
texto += ' '*37
# D: Acumuladores de importe
texto += ' '*10
texto += converter.convert(cr, uid, abs(self.order.total), 10, context)
texto += ' '*(10+6+7+6+6+6)
# E: Controles de lectura de fichero
texto += ' '*5
texto += converter.convert(cr, uid, (self.num_recibos*3) + 2, 7,
context)
texto += converter.convert(cr, uid, self.num_recibos, 6, context)
texto += ' '*6
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') %
('Fin remesa 32', texto), True)
return texto
def _registro_fin_fichero_32(self, cr, uid, context):
converter = self.pool['payment.converter.spain']
# A: Identificación de la operación
texto = '9865'
texto += ' '
# B: Libre
texto += ' '*22
# C: Libre
texto += ' '*37
# D: Acumuladores de importes
texto += ' '*10
texto += converter.convert(cr, uid, abs(self.order.total), 10, context)
texto += ' '*(10+6+7+6+6+6)
# E: Controles de lectura del fichero
texto += '00001'
texto += converter.convert(cr, uid, (self.num_recibos*3) + 3, 7,
context)
texto += converter.convert(cr, uid, self.num_recibos, 6, context)
texto += ' '*6
texto += '\r\n'
if len(texto) != 152:
raise Log(_('Configuration error:\n\nThe line "%s" is not 150 '
'characters long:\n%s') % ('Fin fichero 32', texto),
True)
return texto
def create_file(self, cr, uid, order, lines, context):
self.order = order
txt_remesa = ''
self.num_recibos = 0
self.num_lineas_opc = 0
txt_remesa += self._cabecera_fichero_32(cr, uid)
txt_remesa += self._cabecera_remesa_32(cr, uid, context)
for recibo in lines:
txt_remesa += self._registro_individual_i_32(cr, uid, recibo,
context)
txt_remesa += self._registro_individual_ii_32(cr, uid, recibo,
context)
txt_remesa += self._registro_individual_iii_32(cr, uid, recibo,
context)
self.num_recibos = self.num_recibos + 1
txt_remesa += self._registro_fin_remesa_32(cr, uid, context)
txt_remesa += self._registro_fin_fichero_32(cr, uid, context)
return txt_remesa
| agpl-3.0 |
NCI-Cloud/horizon | openstack_dashboard/dashboards/project/database_backups/tables.py | 33 | 6058 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as d_filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
STATUS_CHOICES = (
("BUILDING", None),
("COMPLETED", True),
("DELETE_FAILED", False),
("FAILED", False),
("NEW", None),
("SAVING", None),
)
STATUS_DISPLAY_CHOICES = (
("BUILDING", pgettext_lazy("Current status of a Database Backup",
u"Building")),
("COMPLETED", pgettext_lazy("Current status of a Database Backup",
u"Completed")),
("DELETE_FAILED", pgettext_lazy("Current status of a Database Backup",
u"Delete Failed")),
("FAILED", pgettext_lazy("Current status of a Database Backup",
u"Failed")),
("NEW", pgettext_lazy("Current status of a Database Backup",
u"New")),
("SAVING", pgettext_lazy("Current status of a Database Backup",
u"Saving")),
)
class LaunchLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal", "btn-create")
icon = "camera"
class RestoreLink(tables.LinkAction):
name = "restore"
verbose_name = _("Restore Backup")
url = "horizon:project:databases:launch"
classes = ("ajax-modal",)
icon = "cloud-upload"
def allowed(self, request, backup=None):
return backup.status == 'COMPLETED'
def get_link_url(self, datum):
url = reverse(self.url)
return url + '?backup=%s' % datum.id
class DownloadBackup(tables.LinkAction):
name = "download"
verbose_name = _("Download Backup")
url = 'horizon:project:containers:object_download'
classes = ("btn-download",)
def get_link_url(self, datum):
ref = datum.locationRef.split('/')
container_name = ref[5]
object_path = '/'.join(ref[6:])
return reverse(self.url,
kwargs={'container_name': container_name,
'object_path': object_path})
def allowed(self, request, datum):
return datum.status == 'COMPLETED'
class DeleteBackup(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Backup",
u"Delete Backups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Backup",
u"Deleted Backups",
count
)
def delete(self, request, obj_id):
api.trove.backup_delete(request, obj_id)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, backup_id):
backup = api.trove.backup_get(request, backup_id)
try:
backup.instance = api.trove.instance_get(request,
backup.instance_id)
except Exception:
pass
return backup
def db_link(obj):
if not hasattr(obj, 'instance'):
return
if hasattr(obj.instance, 'name'):
return reverse(
'horizon:project:databases:detail',
kwargs={'instance_id': obj.instance_id})
def db_name(obj):
if not hasattr(obj, 'instance') or not hasattr(obj.instance, 'name'):
return obj.instance_id
return obj.instance.name
def get_datastore(obj):
if hasattr(obj, "datastore"):
return obj.datastore["type"]
return _("Not available")
def get_datastore_version(obj):
if hasattr(obj, "datastore"):
return obj.datastore["version"]
return _("Not available")
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class BackupsTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:database_backups:detail",
verbose_name=_("Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
instance = tables.Column(db_name, link=db_link,
verbose_name=_("Database"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, DeleteBackup)
row_actions = (RestoreLink, DownloadBackup, DeleteBackup)
| apache-2.0 |
sorenk/ansible | lib/ansible/module_utils/facts/system/caps.py | 232 | 2153 | # Collect facts related to systems 'capabilities' via capsh
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class SystemCapabilitiesFactCollector(BaseFactCollector):
name = 'caps'
_fact_ids = set(['system_capabilities',
'system_capabilities_enforced'])
def collect(self, module=None, collected_facts=None):
facts_dict = {}
if not module:
return facts_dict
capsh_path = module.get_bin_path('capsh')
# NOTE: early exit 'if not crash_path' and unindent rest of method -akl
if capsh_path:
# NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl
rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace')
enforced_caps = []
enforced = 'NA'
for line in out.splitlines():
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
facts_dict['system_capabilities_enforced'] = enforced
facts_dict['system_capabilities'] = enforced_caps
return facts_dict
| gpl-3.0 |
itstriz/chicago_bus_equity | download_stops.py | 1 | 2860 | #!env/bin/python
import csv, os, urllib2
import xml.etree.ElementTree as ET
from app import db
from app.models import Route, Stop
from bs4 import BeautifulSoup
from config_local import STOPS_FILE
from zipfile import ZipFile
# Define URLs and file name
url = 'https://data.cityofchicago.org/download/84eu-buny/application/zip'
def download_stops(url):
# Download KMZ file from Data Portal
print "Downloading %s" % STOPS_FILE
f = urllib2.urlopen(url)
data = f.read()
with open(STOPS_FILE, 'wb') as stops_file:
stops_file.write(data)
def parse_kml():
# Open as KML and parse
kmz = ZipFile(STOPS_FILE, 'r')
kml = kmz.open('doc.kml', 'r')
# Define document structure for ElementTree
# <Document
# <Folder>
# <Placemark>
root = ET.fromstring(kml.read())
namespace = '{http://www.opengis.net/kml/2.2}'
document = root.find('%sDocument' % namespace)
folder = document.find('%sFolder' % namespace)
# Loop through placemarks
for placemark in folder.findall('%sPlacemark' % namespace):
stop_name = placemark.find('%sname' % namespace).text
description = placemark.find('%sdescription' % namespace).text
soup = BeautifulSoup(description)
all_tds = soup.findAll('td')
system_stop_id = all_tds[3].text
street = all_tds[5].text
cross_street = all_tds[7].text
direction = all_tds[9].text
lat = all_tds[23].text
lon = all_tds[25].text
routes = all_tds[13].text.split(',')
data = {'stop_name': stop_name,
'stop_id': system_stop_id,
'street': street,
'cross_street': cross_street,
'direction': direction,
'latitude': lat,
'longitude': lon,
'routes': routes}
if update_database(data) is None:
print 'Error updating database.'
return None
return None
def update_database(data):
stop = Stop.query.filter_by(stop_id = data['stop_id']).first()
if stop is None:
print 'Stop %s not found.' % data['stop_id']
s = Stop(name = data['stop_name'],
stop_id = data['stop_id'],
street = data['street'],
cross_street = data['cross_street'],
direction = data['direction'],
latitude = data['latitude'],
longitude = data['longitude'])
db.session.add(s)
db.session.commit()
for route in data['routes']:
r = Route.query.filter_by(route_num=route).first()
if r is not None:
s.add_route(r)
db.session.add(s)
db.session.commit()
return s.id
else:
print 'Stop %s was found. Skipping.' % data['stop_id']
return stop.id
parse_kml()
| gpl-2.0 |
simonwydooghe/ansible | contrib/inventory/vagrant.py | 30 | 4064 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_path = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
# 2015 Igor Khomyakov <homyakov@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_user'),
('hostname', 'ansible_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 |
ross128/stg | ships/migrations/0005_add_ship_cargo.py | 1 | 1089 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from goods.models import Stock
def insertDefaultCargoIDs(apps, schema_editor):
"""inserts the default stock IDs for the newly created OneToOneField"""
Ship = apps.get_model("ships", "Ship")
for ship in Ship.objects.all():
cargo = Stock.objects.create()
cargo.save()
ship.cargo_id = cargo.pk
ship.save()
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
('ships', '0004_module_values'),
]
operations = [
migrations.AddField(
model_name='ship',
name='cargo',
field=models.OneToOneField(to='goods.Stock', null=True),
preserve_default=False,
),
migrations.RunPython(insertDefaultCargoIDs, reverse_code=migrations.RunPython.noop),
migrations.AlterField(
model_name='ship',
name='cargo',
field=models.OneToOneField(to='goods.Stock', null=False),
),
]
| gpl-3.0 |
Spike96/Python_Training_ | fixture/application.py | 1 | 1045 | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.users import UsersHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
# Next method can remove if a page and its elements are uploaded quickly
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.users = UsersHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| apache-2.0 |
Airphrame/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/template_tools.py | 215 | 9443 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
## ROS Message generatation
##
##
import sys
import os
import em
import genmsg.command_line
import genmsg.msgs
import genmsg.msg_loader
import genmsg.gentools
# generate msg or srv files from a template file
# template_map of the form { 'template_file':'output_file'} output_file can contain @NAME@ which will be replaced by the message/service name
def _generate_from_spec(input_file, output_dir, template_dir, msg_context, spec, template_map, search_path):
md5sum = genmsg.gentools.compute_md5(msg_context, spec)
# precompute msg definition once
if isinstance(spec, genmsg.msgs.MsgSpec):
msg_definition = genmsg.gentools.compute_full_text(msg_context, spec)
# Loop over all files to generate
for template_file_name, output_file_name in template_map.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name.replace("@NAME@", spec.short_name))
#print "generate_from_template %s %s %s" % (input_file, template_file, output_file)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator interpreter
g = {
"file_name_in": input_file,
"spec": spec,
"md5sum": md5sum,
"search_path": search_path,
"msg_context": msg_context
}
if isinstance(spec, genmsg.msgs.MsgSpec):
g['msg_definition'] = msg_definition
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, globals=g, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
def _generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict):
# Read MsgSpec from .msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent msg file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
msg_template_dict,
search_path)
def _generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict):
# Read MsgSpec from .srv.file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent srv file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
srv_template_dict,
search_path)
# Generate the language dependent msg file for the srv request
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.request,
msg_template_dict,
search_path)
# Generate the language dependent msg file for the srv response
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.response,
msg_template_dict,
search_path)
# uniform interface for genering either srv or msg files
def generate_from_file(input_file, package_name, output_dir, template_dir, include_path, msg_template_dict, srv_template_dict):
# Normalize paths
input_file = os.path.abspath(input_file)
output_dir = os.path.abspath(output_dir)
# Create output dir
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno != 17: # ignore file exists error
raise
# Parse include path dictionary
if( include_path ):
search_path = genmsg.command_line.includepath_to_dict(include_path)
else:
search_path = {}
# Generate the file(s)
if input_file.endswith(".msg"):
_generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict)
elif input_file.endswith(".srv"):
_generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict)
else:
assert False, "Uknown file extension for %s"%input_file
def generate_module(package_name, output_dir, template_dir, template_dict):
# Locate generate msg files
files = os.listdir(output_dir)
# Loop over all files to generate
for template_file_name, output_file_name in template_dict.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator intepreter
g = dict(files=files,
package=package_name)
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
interpreter.updateGlobals(g)
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
# Uniform interface to support the standard command line options
def generate_from_command_line_options(argv, msg_template_dict, srv_template_dict, module_template_dict = {}):
from optparse import OptionParser
parser = OptionParser("[options] <srv file>")
parser.add_option("-p", dest='package',
help="ros package the generated msg/srv files belongs to")
parser.add_option("-o", dest='outdir',
help="directory in which to place output files")
parser.add_option("-I", dest='includepath',
help="include path to search for messages",
action="append")
parser.add_option("-m", dest='module',
help="write the module file",
action='store_true', default=False)
parser.add_option("-e", dest='emdir',
help="directory containing template files",
default=sys.path[0])
(options, argv) = parser.parse_args(argv)
if( not options.package or not options.outdir or not options.emdir):
parser.print_help()
exit(-1)
if( options.module ):
generate_module(options.package, options.outdir, options.emdir, module_template_dict)
else:
if len(argv) > 1:
generate_from_file(argv[1], options.package, options.outdir, options.emdir, options.includepath, msg_template_dict, srv_template_dict)
else:
parser.print_help()
exit(-1)
| gpl-3.0 |
holachek/ecosense | app/tastypie/management/commands/backfill_api_keys.py | 5 | 1060 | from django.core.management.base import NoArgsCommand
from tastypie.compat import User
from tastypie.models import ApiKey
class Command(NoArgsCommand):
help = "Goes through all users and adds API keys for any that don't have one."
def handle_noargs(self, **options):
"""Goes through all users and adds API keys for any that don't have one."""
self.verbosity = int(options.get('verbosity', 1))
for user in User.objects.all().iterator():
try:
api_key = ApiKey.objects.get(user=user)
if not api_key.key:
# Autogenerate the key.
api_key.save()
if self.verbosity >= 1:
print u"Generated a new key for '%s'" % user.username
except ApiKey.DoesNotExist:
api_key = ApiKey.objects.create(user=user)
if self.verbosity >= 1:
print u"Created a new key for '%s'" % user.username
| mit |
waxmanr/moose | framework/contrib/nsiqcppstyle/rules/RULE_4_2_A_B_space_around_word.py | 43 | 3213 | """
Provide space around word.
This rule checks if the spaces are provided before and after 'if', 'else', 'for' word
in the function scope
It doens't check 'switch' and 'while' because they are commonly attached following "("
== Violation ==
void function()
{
for(k;j;c) { <== Violation. it should be 'for (k;j;c)'
}
if(k) { <== Violation. it should be 'if (k)'
}else { <== Violation. it should be '} else'
}
}
== Good ==
#define KK for(a;b;c) <== Don't care. It's not function scope.
void function() {
for (k;j;c) { <== OK
}
if (k) { <== OK
} else { <== OK
}
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
words = (
"FOR",
"ELSE",
"IF",
)
def RunRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type in words :
t2 = lexer.PeekNextToken()
t3 = lexer.PeekPrevToken()
if t2 != None and t3 != None:
if t2.type not in ["SPACE", "LINEFEED", "PREPROCESSORNEXT"] or t3.type not in ["SPACE", "LINEFEED"] :
if not Search("^[ ]*#[ ]*include", t.line) :
nsiqcppstyle_reporter.Error(t, __name__,
"Put space before/after word '%s'." % t.value)
ruleManager.AddFunctionScopeRule(RunRule)
ruleManager.AddPreprocessRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionScopeRule(RunRule)
ruleManager.AddPreprocessRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
void function(int k, int j, int pp)
{
for(a;b;c) {
}
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
void function(int k, int j, int pp)
{
if(k==3)
{
}
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
void function(int k, int j, int pp)
{
if (k==3)
{
}else {
}
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
if(k==3)
{
}
void function(int k, int j, int pp)
{
if (k==3)
{
} else {
}
while(True) {
}
for (k;j; c) {
}
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
#define AA do {\\
} while(0)
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
#define AA if\\
{} while(0)
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/thisFile.c",
"""
#define AA if(\\
{} while(0)
""")
assert CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/thisFile.c",
"""
# include <boost/preprocessor/repetition/for.hpp>
""")
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
havard024/prego | venv/lib/python2.7/site-packages/unidecode/x0b0.py | 253 | 4853 | data = (
'ggwem', # 0x00
'ggweb', # 0x01
'ggwebs', # 0x02
'ggwes', # 0x03
'ggwess', # 0x04
'ggweng', # 0x05
'ggwej', # 0x06
'ggwec', # 0x07
'ggwek', # 0x08
'ggwet', # 0x09
'ggwep', # 0x0a
'ggweh', # 0x0b
'ggwi', # 0x0c
'ggwig', # 0x0d
'ggwigg', # 0x0e
'ggwigs', # 0x0f
'ggwin', # 0x10
'ggwinj', # 0x11
'ggwinh', # 0x12
'ggwid', # 0x13
'ggwil', # 0x14
'ggwilg', # 0x15
'ggwilm', # 0x16
'ggwilb', # 0x17
'ggwils', # 0x18
'ggwilt', # 0x19
'ggwilp', # 0x1a
'ggwilh', # 0x1b
'ggwim', # 0x1c
'ggwib', # 0x1d
'ggwibs', # 0x1e
'ggwis', # 0x1f
'ggwiss', # 0x20
'ggwing', # 0x21
'ggwij', # 0x22
'ggwic', # 0x23
'ggwik', # 0x24
'ggwit', # 0x25
'ggwip', # 0x26
'ggwih', # 0x27
'ggyu', # 0x28
'ggyug', # 0x29
'ggyugg', # 0x2a
'ggyugs', # 0x2b
'ggyun', # 0x2c
'ggyunj', # 0x2d
'ggyunh', # 0x2e
'ggyud', # 0x2f
'ggyul', # 0x30
'ggyulg', # 0x31
'ggyulm', # 0x32
'ggyulb', # 0x33
'ggyuls', # 0x34
'ggyult', # 0x35
'ggyulp', # 0x36
'ggyulh', # 0x37
'ggyum', # 0x38
'ggyub', # 0x39
'ggyubs', # 0x3a
'ggyus', # 0x3b
'ggyuss', # 0x3c
'ggyung', # 0x3d
'ggyuj', # 0x3e
'ggyuc', # 0x3f
'ggyuk', # 0x40
'ggyut', # 0x41
'ggyup', # 0x42
'ggyuh', # 0x43
'ggeu', # 0x44
'ggeug', # 0x45
'ggeugg', # 0x46
'ggeugs', # 0x47
'ggeun', # 0x48
'ggeunj', # 0x49
'ggeunh', # 0x4a
'ggeud', # 0x4b
'ggeul', # 0x4c
'ggeulg', # 0x4d
'ggeulm', # 0x4e
'ggeulb', # 0x4f
'ggeuls', # 0x50
'ggeult', # 0x51
'ggeulp', # 0x52
'ggeulh', # 0x53
'ggeum', # 0x54
'ggeub', # 0x55
'ggeubs', # 0x56
'ggeus', # 0x57
'ggeuss', # 0x58
'ggeung', # 0x59
'ggeuj', # 0x5a
'ggeuc', # 0x5b
'ggeuk', # 0x5c
'ggeut', # 0x5d
'ggeup', # 0x5e
'ggeuh', # 0x5f
'ggyi', # 0x60
'ggyig', # 0x61
'ggyigg', # 0x62
'ggyigs', # 0x63
'ggyin', # 0x64
'ggyinj', # 0x65
'ggyinh', # 0x66
'ggyid', # 0x67
'ggyil', # 0x68
'ggyilg', # 0x69
'ggyilm', # 0x6a
'ggyilb', # 0x6b
'ggyils', # 0x6c
'ggyilt', # 0x6d
'ggyilp', # 0x6e
'ggyilh', # 0x6f
'ggyim', # 0x70
'ggyib', # 0x71
'ggyibs', # 0x72
'ggyis', # 0x73
'ggyiss', # 0x74
'ggying', # 0x75
'ggyij', # 0x76
'ggyic', # 0x77
'ggyik', # 0x78
'ggyit', # 0x79
'ggyip', # 0x7a
'ggyih', # 0x7b
'ggi', # 0x7c
'ggig', # 0x7d
'ggigg', # 0x7e
'ggigs', # 0x7f
'ggin', # 0x80
'gginj', # 0x81
'gginh', # 0x82
'ggid', # 0x83
'ggil', # 0x84
'ggilg', # 0x85
'ggilm', # 0x86
'ggilb', # 0x87
'ggils', # 0x88
'ggilt', # 0x89
'ggilp', # 0x8a
'ggilh', # 0x8b
'ggim', # 0x8c
'ggib', # 0x8d
'ggibs', # 0x8e
'ggis', # 0x8f
'ggiss', # 0x90
'gging', # 0x91
'ggij', # 0x92
'ggic', # 0x93
'ggik', # 0x94
'ggit', # 0x95
'ggip', # 0x96
'ggih', # 0x97
'na', # 0x98
'nag', # 0x99
'nagg', # 0x9a
'nags', # 0x9b
'nan', # 0x9c
'nanj', # 0x9d
'nanh', # 0x9e
'nad', # 0x9f
'nal', # 0xa0
'nalg', # 0xa1
'nalm', # 0xa2
'nalb', # 0xa3
'nals', # 0xa4
'nalt', # 0xa5
'nalp', # 0xa6
'nalh', # 0xa7
'nam', # 0xa8
'nab', # 0xa9
'nabs', # 0xaa
'nas', # 0xab
'nass', # 0xac
'nang', # 0xad
'naj', # 0xae
'nac', # 0xaf
'nak', # 0xb0
'nat', # 0xb1
'nap', # 0xb2
'nah', # 0xb3
'nae', # 0xb4
'naeg', # 0xb5
'naegg', # 0xb6
'naegs', # 0xb7
'naen', # 0xb8
'naenj', # 0xb9
'naenh', # 0xba
'naed', # 0xbb
'nael', # 0xbc
'naelg', # 0xbd
'naelm', # 0xbe
'naelb', # 0xbf
'naels', # 0xc0
'naelt', # 0xc1
'naelp', # 0xc2
'naelh', # 0xc3
'naem', # 0xc4
'naeb', # 0xc5
'naebs', # 0xc6
'naes', # 0xc7
'naess', # 0xc8
'naeng', # 0xc9
'naej', # 0xca
'naec', # 0xcb
'naek', # 0xcc
'naet', # 0xcd
'naep', # 0xce
'naeh', # 0xcf
'nya', # 0xd0
'nyag', # 0xd1
'nyagg', # 0xd2
'nyags', # 0xd3
'nyan', # 0xd4
'nyanj', # 0xd5
'nyanh', # 0xd6
'nyad', # 0xd7
'nyal', # 0xd8
'nyalg', # 0xd9
'nyalm', # 0xda
'nyalb', # 0xdb
'nyals', # 0xdc
'nyalt', # 0xdd
'nyalp', # 0xde
'nyalh', # 0xdf
'nyam', # 0xe0
'nyab', # 0xe1
'nyabs', # 0xe2
'nyas', # 0xe3
'nyass', # 0xe4
'nyang', # 0xe5
'nyaj', # 0xe6
'nyac', # 0xe7
'nyak', # 0xe8
'nyat', # 0xe9
'nyap', # 0xea
'nyah', # 0xeb
'nyae', # 0xec
'nyaeg', # 0xed
'nyaegg', # 0xee
'nyaegs', # 0xef
'nyaen', # 0xf0
'nyaenj', # 0xf1
'nyaenh', # 0xf2
'nyaed', # 0xf3
'nyael', # 0xf4
'nyaelg', # 0xf5
'nyaelm', # 0xf6
'nyaelb', # 0xf7
'nyaels', # 0xf8
'nyaelt', # 0xf9
'nyaelp', # 0xfa
'nyaelh', # 0xfb
'nyaem', # 0xfc
'nyaeb', # 0xfd
'nyaebs', # 0xfe
'nyaes', # 0xff
)
| mit |
Nateowami/flex-languagedepot-metadata | src/createDb.py | 1 | 3005 | #!/usr/bin/python3
from LanguageDepotAnalyze import getListOfCapabilities
from importlib import import_module
# in this file, we write a SQL script which will create the database.
# the script will take this format:
#
# -- create schemas
# CREATE SCHEMA project;
#
# -- set search path
# SET search_path TO project,public;
#
# -- create tables
# CREATE TABLE project.metadata (
# name varchar(80),
# id serial,
# projectCode varchar(80), -- same as name
# projectSizeInMB int,
# numberOfRevisions int,
# createdDate date, -- Date of first commit
# modifiedDate date -- Date of last commit
# );
def main():
# Here, we make the list of elements. The first three added here are
# non-capabilites and MUST NOT be removed. Removing the element 'name' will
# break the program entirely.
listOfElements = []
listOfElements.append(['name', 'varchar(80)'])
listOfElements.append(['id', 'serial'])
listOfElements.append(['projectCode', 'varchar(80)'])
# Using the list of capabilities, we import each capability and use its
# getColumns() function to add it to the list of elements.
# If the list contains a list, then we add in the metalist as if it were a
# column.
# All other metalists in the list are added in the same way.
listOfCapabilities = getListOfCapabilities()
for capability in listOfCapabilities:
capabilityModule = import_module(capability)
if isinstance(capabilityModule.tasks.getColumns()[0], list):
for element in capabilityModule.tasks.getColumns():
if len(element) % 2 == 0:
listOfElements.append(element)
else:
raise ValueError('please insert a type for each element.')
elif len(capabilityModule.tasks.getColumns()) % 2 == 0:
listOfElements.append(capabilityModule.tasks.getColumns())
else:
raise ValueError('please insert a type for each element.')
# this element must be last, it tells the program if the file is done
# scanning
listOfElements.append(['scanDone', 'boolean'])
# SQL string variables
schema = ['-- create schemas\n',
'CREATE SCHEMA project;\n',
'-- set search path\n',
'SET search_path TO project,public;\n'
]
tableStart = ['-- create tables\n',
'CREATE TABLE project.metadata (\n'
]
tableEnd = ');\n'
# write!
sqlFile = open('languagedepot-metadata.sql', 'w')
sqlFile.writelines(schema)
sqlFile.writelines(tableStart)
for element in listOfElements:
# If it's the very last element
if element == listOfElements[-1]:
sqlFile.write(element[0] + ' ' + element[1] + '\n')
else:
sqlFile.write(element[0] + ' ' + element[1] + ',' + '\n')
sqlFile.write(tableEnd)
sqlFile.close()
# end of main()
if __name__ == "__main__":
main()
| mit |
tucksaun/status.tucksaun.net | stashboard/contrib/httplib2/__init__.py | 23 | 53960 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except (AttributeError, ImportError):
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status == 303:
redirect_method = "GET"
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| mit |
espadrine/opera | chromium/src/third_party/python_26/Lib/distutils/tests/test_sdist.py | 47 | 4905 | """Tests for distutils.command.sdist."""
import os
import unittest
import shutil
import zipfile
from os.path import join
import sys
from distutils.command.sdist import sdist
from distutils.core import Distribution
from distutils.tests.test_config import PyPIRCCommandTestCase
from distutils.errors import DistutilsExecError
from distutils.spawn import find_executable
CURDIR = os.path.dirname(__file__)
TEMP_PKG = join(CURDIR, 'temppkg')
SETUP_PY = """
from distutils.core import setup
import somecode
setup(name='fake')
"""
MANIFEST_IN = """
recursive-include somecode *
"""
class sdistTestCase(PyPIRCCommandTestCase):
def setUp(self):
PyPIRCCommandTestCase.setUp(self)
self.old_path = os.getcwd()
def tearDown(self):
os.chdir(self.old_path)
if os.path.exists(TEMP_PKG):
shutil.rmtree(TEMP_PKG)
PyPIRCCommandTestCase.tearDown(self)
def _init_tmp_pkg(self):
if os.path.exists(TEMP_PKG):
shutil.rmtree(TEMP_PKG)
os.mkdir(TEMP_PKG)
os.mkdir(join(TEMP_PKG, 'somecode'))
os.mkdir(join(TEMP_PKG, 'dist'))
# creating a MANIFEST, a package, and a README
self._write(join(TEMP_PKG, 'MANIFEST.in'), MANIFEST_IN)
self._write(join(TEMP_PKG, 'README'), 'xxx')
self._write(join(TEMP_PKG, 'somecode', '__init__.py'), '#')
self._write(join(TEMP_PKG, 'setup.py'), SETUP_PY)
os.chdir(TEMP_PKG)
def _write(self, path, content):
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def test_prune_file_list(self):
# this test creates a package with some vcs dirs in it
# and launch sdist to make sure they get pruned
# on all systems
self._init_tmp_pkg()
# creating VCS directories with some files in them
os.mkdir(join(TEMP_PKG, 'somecode', '.svn'))
self._write(join(TEMP_PKG, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(TEMP_PKG, 'somecode', '.hg'))
self._write(join(TEMP_PKG, 'somecode', '.hg',
'ok'), 'xxx')
os.mkdir(join(TEMP_PKG, 'somecode', '.git'))
self._write(join(TEMP_PKG, 'somecode', '.git',
'ok'), 'xxx')
# now building a sdist
dist = Distribution()
dist.script_name = 'setup.py'
dist.metadata.name = 'fake'
dist.metadata.version = '1.0'
dist.metadata.url = 'http://xxx'
dist.metadata.author = dist.metadata.author_email = 'xxx'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.manifest = 'MANIFEST'
cmd.template = 'MANIFEST.in'
cmd.dist_dir = 'dist'
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.run()
# now let's check what we have
dist_folder = join(TEMP_PKG, 'dist')
files = os.listdir(dist_folder)
self.assertEquals(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
self.assertEquals(len(content), 4)
def test_make_distribution(self):
# check if tar and gzip are installed
if (find_executable('tar') is None or
find_executable('gzip') is None):
return
self._init_tmp_pkg()
# now building a sdist
dist = Distribution()
dist.script_name = 'setup.py'
dist.metadata.name = 'fake'
dist.metadata.version = '1.0'
dist.metadata.url = 'http://xxx'
dist.metadata.author = dist.metadata.author_email = 'xxx'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.manifest = 'MANIFEST'
cmd.template = 'MANIFEST.in'
cmd.dist_dir = 'dist'
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.run()
# making sure we have two files
dist_folder = join(TEMP_PKG, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEquals(result,
['fake-1.0.tar', 'fake-1.0.tar.gz'] )
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEquals(result,
['fake-1.0.tar', 'fake-1.0.tar.gz'])
def test_suite():
return unittest.makeSuite(sdistTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| bsd-3-clause |
rhertzog/django | tests/force_insert_update/tests.py | 381 | 2213 | from __future__ import unicode_literals
from django.db import DatabaseError, IntegrityError, transaction
from django.test import TestCase
from .models import (
Counter, InheritedCounter, ProxyCounter, SubCounter, WithCustomPK,
)
class ForceTests(TestCase):
def test_force_update(self):
c = Counter.objects.create(name="one", value=1)
# The normal case
c.value = 2
c.save()
# Same thing, via an update
c.value = 3
c.save(force_update=True)
# Won't work because force_update and force_insert are mutually
# exclusive
c.value = 4
with self.assertRaises(ValueError):
c.save(force_insert=True, force_update=True)
# Try to update something that doesn't have a primary key in the first
# place.
c1 = Counter(name="two", value=2)
with self.assertRaises(ValueError):
with transaction.atomic():
c1.save(force_update=True)
c1.save(force_insert=True)
# Won't work because we can't insert a pk of the same value.
c.value = 5
with self.assertRaises(IntegrityError):
with transaction.atomic():
c.save(force_insert=True)
# Trying to update should still fail, even with manual primary keys, if
# the data isn't in the database already.
obj = WithCustomPK(name=1, value=1)
with self.assertRaises(DatabaseError):
with transaction.atomic():
obj.save(force_update=True)
class InheritanceTests(TestCase):
def test_force_update_on_inherited_model(self):
a = InheritedCounter(name="count", value=1, tag="spam")
a.save()
a.save(force_update=True)
def test_force_update_on_proxy_model(self):
a = ProxyCounter(name="count", value=1)
a.save()
a.save(force_update=True)
def test_force_update_on_inherited_model_without_fields(self):
'''
Issue 13864: force_update fails on subclassed models, if they don't
specify custom fields.
'''
a = SubCounter(name="count", value=1)
a.save()
a.value = 2
a.save(force_update=True)
| bsd-3-clause |
vnsofthe/odoo | addons/auth_ldap/__openerp__.py | 260 | 1516 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Authentication via LDAP',
'version' : '1.0',
'depends' : ['base'],
'author' : 'OpenERP SA',
#'description': < auto-loaded from README file
'website' : 'https://www.odoo.com',
'category' : 'Authentication',
'data' : [
'users_ldap_view.xml',
'user_ldap_installer.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'installable': True,
'external_dependencies' : {
'python' : ['ldap'],
}
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shepdelacreme/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_facts.py | 10 | 5144 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork_facts
version_added: "2.1"
short_description: Get virtual network facts.
description:
- Get facts for a specific virtual network or all virtual networks within a resource group.
options:
name:
description:
- Only show results for a specific security group.
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht house@redhat.com"
- "Matt Davis mdavis@redhat.com"
'''
EXAMPLES = '''
- name: Get facts for one virtual network
azure_rm_virtualnetwork_facts:
resource_group: Testing
name: secgroup001
- name: Get facts for all virtual networks
azure_rm_virtualnetwork_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_virtualnetwork_facts:
tags:
- testing
'''
RETURN = '''
azure_virtualnetworks:
description: List of virtual network dicts.
returned: always
type: list
example: [{
"etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001",
"location": "eastus2",
"name": "vnet2001",
"properties": {
"addressSpace": {
"addressPrefixes": [
"10.10.0.0/16"
]
},
"provisioningState": "Succeeded",
"resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
"subnets": []
},
"type": "Microsoft.Network/virtualNetworks"
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'VirtualNetwork'
class AzureRMNetworkInterfaceFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_virtualnetworks=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item()
else:
self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.network_client.virtual_networks.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.virtual_networks.list(self.resource_group)
except CloudError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all for items')
try:
response = self.network_client.virtual_networks.list_all()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMNetworkInterfaceFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/M2Crypto/util.py | 8 | 1588 | """
M2Crypto utility routines.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved.
Portions created by Open Source Applications Foundation (OSAF) are
Copyright (C) 2004 OSAF. All Rights Reserved.
"""
import sys
import m2
class UtilError(Exception): pass
m2.util_init(UtilError)
def h2b(s):
import array, string
ar=array.array('c')
start=0
if s[:2]=='0x':
start=2
for i in range(start, len(s), 2):
num=string.atoi("%s"%(s[i:i+2],), 16)
ar.append(chr(num))
return ar.tostring()
def pkcs5_pad(data, blklen=8):
pad=(8-(len(data)%8))
return data+chr(pad)*pad
def pkcs7_pad(data, blklen):
if blklen>255:
raise ValueError, 'illegal block size'
pad=(blklen-(len(data)%blklen))
return data+chr(pad)*pad
def octx_to_num(x):
v = 0L
lx = len(x)
for i in range(lx):
v = v + ord(x[i]) * (256L ** (lx-i-1))
return v
def genparam_callback(p, n, out=sys.stdout):
ch = ['.','+','*','\n']
out.write(ch[p])
out.flush()
def quiet_genparam_callback(p, n, out):
pass
def passphrase_callback(v, prompt1='Enter passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
def no_passphrase_callback(*args):
return ''
| agpl-3.0 |
ktnyt/chainer | tests/chainer_tests/links_tests/connection_tests/test_scale.py | 9 | 4443 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_W': True, 'bias_term': False, 'bias_shape': None},
{'learn_W': True, 'bias_term': True, 'bias_shape': None},
{'learn_W': False, 'bias_term': False, 'bias_shape': None},
{'learn_W': False, 'bias_term': True, 'bias_shape': (2,)}
)
class TestScale(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] *= self.W[j]
if self.bias_term:
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
bias_term = self.bias_term
bias_shape = self.bias_shape
axis = 1
if self.learn_W:
self.link = links.Scale(
axis, self.W.shape, bias_term, bias_shape)
self.link.W.data = self.W
if bias_term:
self.link.bias.b.data = self.b
else:
self.link = links.Scale(
axis, None, bias_term, bias_shape)
if bias_term:
self.link.bias.b.data = self.b
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_W, hasattr(self.link, 'W'))
self.assertEqual(self.bias_term, hasattr(self.link, 'bias'))
def check_forward(self, x_data, W_data, y_expected):
x = chainer.Variable(x_data)
if W_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
W = chainer.Variable(W_data)
y = self.link(x, W)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_forward(self.x, W, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
self.check_forward(x, W, self.y_expected)
def check_backward(self, x_data, W_data, y_grad):
if W_data is None:
params = [self.link.W]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, W_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_backward(self.x, W, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, W, gy)
class TestScaleInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
W_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.W = chainer.Variable(W_data)
def test_scale_invalid_argc1(self):
func = links.Scale(self.axis, self.W.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.W)
def test_scale_invalid_argc2(self):
func = links.Scale(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
class TestScaleNoBiasShape(unittest.TestCase):
def test_scale_no_bias_shape(self):
axis = 1
with self.assertRaises(ValueError):
links.Scale(axis, None, True, None)
testing.run_module(__name__, __file__)
| mit |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/layers/__init__.py | 11 | 2919 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network layers, regularizers, summaries, etc.
See the @{$python/contrib.layers} guide.
@@avg_pool2d
@@batch_norm
@@convolution2d
@@conv2d_in_plane
@@convolution2d_in_plane
@@conv2d_transpose
@@convolution2d_transpose
@@dropout
@@embedding_lookup_unique
@@flatten
@@fully_connected
@@layer_norm
@@linear
@@max_pool2d
@@one_hot_encoding
@@relu
@@relu6
@@repeat
@@safe_embedding_lookup_sparse
@@separable_conv2d
@@separable_convolution2d
@@softmax
@@stack
@@unit_norm
@@embed_sequence
@@apply_regularization
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
@@optimize_loss
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
@@summarize_activations
@@bucketized_column
@@check_feature_columns
@@create_feature_spec_for_parsing
@@crossed_column
@@embedding_column
@@scattered_embedding_column
@@input_from_feature_columns
@@joint_weighted_sum_from_feature_columns
@@make_place_holder_tensors_for_base_features
@@multi_class_target
@@one_hot_column
@@parse_feature_columns_from_examples
@@parse_feature_columns_from_sequence_examples
@@real_valued_column
@@shared_embedding_columns
@@sparse_column_with_hash_bucket
@@sparse_column_with_integerized_feature
@@sparse_column_with_keys
@@weighted_sparse_column
@@weighted_sum_from_feature_columns
@@infer_real_valued_columns
@@sequence_input_from_feature_columns
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['bias_add',
'conv2d',
'feature_column',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'OPTIMIZER_CLS_NAMES',
'regression_target',
'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY',
'summaries']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
maheshakya/NearPy | nearpy/filters/uniquefilter.py | 2 | 1946 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
from nearpy.filters.vectorfilter import VectorFilter
class UniqueFilter(VectorFilter):
"""
Makes sure that each vectors is only once in the vector list. Works on
both types of vector listst - (vector, data, distance) and
(vector, data).
This filter uses the 'data' as key for uniqueness. If you need some
other feature for uniqueness, you can implement your own filter.
You only need a uniqueness filter if your hash-configuration makes it
possible that one vecor is saved in many buckets.
"""
def __init__(self):
pass
def filter_vectors(self, input_list):
"""
Returns subset of specified input list.
"""
unique_dict = {}
for v in input_list:
unique_dict[v[1]] = v
return unique_dict.values()
| mit |
SlimKat-U8950/chil360-kernel | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
kutenai/django | django/urls/utils.py | 133 | 2229 | from __future__ import unicode_literals
from importlib import import_module
from django.core.exceptions import ViewDoesNotExist
from django.utils import lru_cache, six
from django.utils.module_loading import module_has_submodule
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view):
"""
Return a callable corresponding to lookup_view.
* If lookup_view is already a callable, return it.
* If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it, otherwise raise an exception
(ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist("'%s' is not a callable or a dot-notation path" % lookup_view)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
raise ImportError("Could not import '%s'. The path must be fully qualified." % lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name)
)
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name)
)
else:
if not callable(view_func):
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name)
)
return view_func
def get_mod_func(callback):
# Convert 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
| bsd-3-clause |
mbiciunas/nix | src/config/config_scripts.py | 1 | 3091 | # Nix
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import typing
from config.config_script import ConfigScript
from utility.nix_error import NixError
class ConfigScripts:
def __init__(self) -> None:
self._scripts = []
def exist(self, name: str) -> bool:
try:
return True if self.find_by_name(name) else False
except NixError:
return False
def insert(self) -> ConfigScript:
_script = ConfigScript()
self._scripts.append(_script)
return _script
def delete(self, name: str) -> None:
_delete = False
for _script in self._scripts:
if _script.get_name() == name:
self._scripts.remove(_script)
_delete = True
break
if not _delete:
raise NixError("Unable to delete, script not found: {}".format(name))
def list(self, tags: typing.List[str] = list()) -> typing.List[str]:
if tags is None:
tags = []
_result = []
for _script in self._scripts:
if all(tag in _script.get_tags() for tag in tags):
_result.append(_script.get_name())
return _result
def find_by_name(self, name: str) -> ConfigScript:
for _script in self._scripts:
if _script.get_name() == name:
return _script
raise NixError("Unable to find script: {}".format(name))
def find_by_tag(self, tag: str) -> typing.List[ConfigScript]:
_scripts = []
for _script in self._scripts:
if tag in _script.get_tags():
_scripts.append(_script)
if len(_scripts) > 0:
return _scripts
raise NixError("Unable to find script by tag: {}".format(tag))
def find_by_tags(self, tags: typing.List[str]) -> typing.List[ConfigScript]:
_scripts = []
for _script in self._scripts:
if all(tag in _script.get_tags() for tag in tags):
_scripts.append(_script)
return _scripts
def export_data(self) -> typing.List[str]:
_export = []
for _script in self._scripts:
_export.append(_script.export_data())
return _export
def import_data(self, _data: typing.List[dict]) -> None:
for _script_data in _data:
_script = ConfigScript()
_script.import_data(_script_data)
self._scripts.append(_script)
| gpl-3.0 |
kurikuri99/xen_study | tools/python/xen/util/xmlrpcclient.py | 41 | 4533 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
# Copyright (C) 2007 XenSource Inc.
#============================================================================
from httplib import FakeSocket, HTTPConnection, HTTP
import socket
import string
import xmlrpclib
from types import StringTypes
from sys import hexversion
try:
import SSHTransport
ssh_enabled = True
except ImportError:
# SSHTransport is disabled on Python <2.4, because it uses the subprocess
# package.
ssh_enabled = False
# A new ServerProxy that also supports httpu urls. An http URL comes in the
# form:
#
# httpu:///absolute/path/to/socket.sock
#
# It assumes that the RPC handler is /RPC2. This probably needs to be improved
class HTTPUnixConnection(HTTPConnection):
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.host)
class HTTPUnix(HTTP):
_connection_class = HTTPUnixConnection
class UnixTransport(xmlrpclib.Transport):
def request(self, host, handler, request_body, verbose=0):
self.__handler = handler
return xmlrpclib.Transport.request(self, host, '/RPC2',
request_body, verbose)
def make_connection(self, host):
if hexversion < 0x02070000:
# python 2.6 or earlier
return HTTPUnix(self.__handler)
else:
# xmlrpclib.Transport changed in python 2.7
return HTTPUnixConnection(self.__handler)
# We need our own transport for HTTPS, because xmlrpclib.SafeTransport is
# broken -- it does not handle ERROR_ZERO_RETURN properly.
class HTTPSTransport(xmlrpclib.SafeTransport):
def _parse_response(self, file, sock):
p, u = self.getparser()
while 1:
try:
if sock:
response = sock.recv(1024)
else:
response = file.read(1024)
except socket.sslerror, exn:
if exn[0] == socket.SSL_ERROR_ZERO_RETURN:
break
raise
if not response:
break
if self.verbose:
print 'body:', repr(response)
p.feed(response)
file.close()
p.close()
return u.close()
# See xmlrpclib2.TCPXMLRPCServer._marshalled_dispatch.
def conv_string(x):
if isinstance(x, StringTypes):
s = string.replace(x, "'", r"\047")
exec "s = '" + s + "'"
return s
else:
return x
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
if transport == None:
(protocol, rest) = uri.split(':', 1)
if protocol == 'httpu':
uri = 'http:' + rest
transport = UnixTransport()
elif protocol == 'https':
transport = HTTPSTransport()
elif protocol == 'ssh':
global ssh_enabled
if ssh_enabled:
(transport, uri) = SSHTransport.getHTTPURI(uri)
else:
raise ValueError(
"SSH transport not supported on Python <2.4.")
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none)
def __request(self, methodname, params):
response = xmlrpclib.ServerProxy.__request(self, methodname, params)
if isinstance(response, tuple):
return tuple([conv_string(x) for x in response])
else:
return conv_string(response)
| gpl-2.0 |
sudovijay/youtube-dl | youtube_dl/downloader/fragment.py | 11 | 3696 | from __future__ import division, unicode_literals
import os
import time
from .common import FileDownloader
from .http import HttpFD
from ..utils import (
encodeFilename,
sanitize_open,
)
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class FragmentFD(FileDownloader):
"""
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
"""
def _prepare_and_start_frag_download(self, ctx):
self._prepare_frag_download(ctx)
self._start_frag_download(ctx)
def _prepare_frag_download(self, ctx):
self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
self.report_destination(ctx['filename'])
dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'ratelimit': self.params.get('ratelimit', None),
'test': self.params.get('test', False),
}
)
tmpfilename = self.temp_name(ctx['filename'])
dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
ctx.update({
'dl': dl,
'dest_stream': dest_stream,
'tmpfilename': tmpfilename,
})
def _start_frag_download(self, ctx):
total_frags = ctx['total_frags']
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'status': 'downloading',
'downloaded_bytes': 0,
'frag_index': 0,
'frag_count': total_frags,
'filename': ctx['filename'],
'tmpfilename': ctx['tmpfilename'],
}
start = time.time()
ctx['started'] = start
def frag_progress_hook(s):
if s['status'] not in ('downloading', 'finished'):
return
frag_total_bytes = s.get('total_bytes', 0)
if s['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_index'] += 1
estimated_size = (
(state['downloaded_bytes'] + frag_total_bytes) /
(state['frag_index'] + 1) * total_frags)
time_now = time.time()
state['total_bytes_estimate'] = estimated_size
state['elapsed'] = time_now - start
if s['status'] == 'finished':
progress = self.calc_percent(state['frag_index'], total_frags)
else:
frag_downloaded_bytes = s['downloaded_bytes']
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_index'], total_frags)
progress += frag_progress / float(total_frags)
state['eta'] = self.calc_eta(
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
state['speed'] = s.get('speed')
self._hook_progress(state)
ctx['dl'].add_progress_hook(frag_progress_hook)
return start
def _finish_frag_download(self, ctx):
ctx['dest_stream'].close()
elapsed = time.time() - ctx['started']
self.try_rename(ctx['tmpfilename'], ctx['filename'])
fsize = os.path.getsize(encodeFilename(ctx['filename']))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': ctx['filename'],
'status': 'finished',
'elapsed': elapsed,
})
| unlicense |
partofthething/home-assistant | homeassistant/components/mobile_app/http_api.py | 1 | 3847 | """Provides an HTTP API for mobile_app."""
import secrets
from typing import Dict
from aiohttp.web import Request, Response
import emoji
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import ATTR_DEVICE_ID, CONF_WEBHOOK_ID, HTTP_CREATED
from homeassistant.helpers import config_validation as cv
from homeassistant.util import slugify
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_APP_VERSION,
ATTR_DEVICE_NAME,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_NAME,
ATTR_OS_VERSION,
ATTR_SUPPORTS_ENCRYPTION,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
CONF_USER_ID,
DOMAIN,
)
from .helpers import supports_encryption
class RegistrationsView(HomeAssistantView):
"""A view that accepts registration requests."""
url = "/api/mobile_app/registrations"
name = "api:mobile_app:register"
@RequestDataValidator(
vol.Schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_ID): cv.string,
vol.Required(ATTR_APP_NAME): cv.string,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_DEVICE_ID): cv.string, # Added in 0.104
vol.Required(ATTR_OS_NAME): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
vol.Required(ATTR_SUPPORTS_ENCRYPTION, default=False): cv.boolean,
},
# To allow future apps to send more data
extra=vol.REMOVE_EXTRA,
)
)
async def post(self, request: Request, data: Dict) -> Response:
"""Handle the POST request for registration."""
hass = request.app["hass"]
webhook_id = secrets.token_hex()
if hass.components.cloud.async_active_subscription():
data[
CONF_CLOUDHOOK_URL
] = await hass.components.cloud.async_create_cloudhook(webhook_id)
data[CONF_WEBHOOK_ID] = webhook_id
if data[ATTR_SUPPORTS_ENCRYPTION] and supports_encryption():
data[CONF_SECRET] = secrets.token_hex(SecretBox.KEY_SIZE)
data[CONF_USER_ID] = request["hass_user"].id
if slugify(data[ATTR_DEVICE_NAME], separator=""):
# if slug is not empty and would not only be underscores
# use DEVICE_NAME
pass
elif emoji.emoji_count(data[ATTR_DEVICE_NAME]):
# If otherwise empty string contains emoji
# use descriptive name of the first emoji
data[ATTR_DEVICE_NAME] = emoji.demojize(
emoji.emoji_lis(data[ATTR_DEVICE_NAME])[0]["emoji"]
).replace(":", "")
else:
# Fallback to DEVICE_ID
data[ATTR_DEVICE_NAME] = data[ATTR_DEVICE_ID]
await hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, data=data, context={"source": "registration"}
)
)
remote_ui_url = None
try:
remote_ui_url = hass.components.cloud.async_remote_ui_url()
except hass.components.cloud.CloudNotAvailable:
pass
return self.json(
{
CONF_CLOUDHOOK_URL: data.get(CONF_CLOUDHOOK_URL),
CONF_REMOTE_UI_URL: remote_ui_url,
CONF_SECRET: data.get(CONF_SECRET),
CONF_WEBHOOK_ID: data[CONF_WEBHOOK_ID],
},
status_code=HTTP_CREATED,
)
| mit |
udacity/gae-discourse-sso | example.py | 1 | 1068 | """
Example usage for using DiscourseSSOHandler:
- set SSO_SECRET to the same value as sso_secret from the Discourse settings
- setup DISCOURSE_URL
- set enable_sso in Discourse and set sso_url to http://localhost:8080/
"""
import sys
sys.path.insert(0, '..')
import discourse_sso
from google.appengine.api import users
import hashlib
import hmac
import webapp2
class SSOHandler(discourse_sso.DiscourseSSOHandler):
USER_ID_HMAC_KEY = 'superkalifragilistikexpialigetisch'
SSO_SECRET = 'secret-secret-secret'
DISCOURSE_URL = 'https://discourse_host/session/sso_login'
def getUser(self):
user = users.get_current_user()
return {
# hash the user id to avoid leaking it to the external system
'id': hmac.new(self.USER_ID_HMAC_KEY, user.user_id(), hashlib.sha256).hexdigest(),
'username': user.nickname(),
'email': user.email(),
}
def redirectToLogin(self):
# taken care by app.yaml
pass
APP = webapp2.WSGIApplication([
webapp2.Route('/', SSOHandler)
])
| apache-2.0 |
dakcarto/QGIS | python/ext-libs/pygments/scanner.py | 365 | 3114 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| gpl-2.0 |
e0ne/python-brickagentclient | brickclient/utils.py | 1 | 9615 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pkg_resources
import re
import sys
import uuid
import six
import prettytable
from brickclient import exceptions
from brickclient.openstack.common import strutils
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def add_resource_manager_extra_kwargs_hook(f, hook):
"""Adds hook to bind CLI arguments to ResourceManager calls.
The `do_foo` calls in shell.py will receive CLI args and then in turn pass
them through to the ResourceManager. Before passing through the args, the
hooks registered here will be called, giving us a chance to add extra
kwargs (taken from the command-line) to what's passed to the
ResourceManager.
"""
if not hasattr(f, 'resource_manager_kwargs_hooks'):
f.resource_manager_kwargs_hooks = []
names = [h.__name__ for h in f.resource_manager_kwargs_hooks]
if hook.__name__ not in names:
f.resource_manager_kwargs_hooks.append(hook)
def get_resource_manager_extra_kwargs(f, args, allow_conflicts=False):
"""Return extra_kwargs by calling resource manager kwargs hooks."""
hooks = getattr(f, "resource_manager_kwargs_hooks", [])
extra_kwargs = {}
for hook in hooks:
hook_name = hook.__name__
hook_kwargs = hook(args)
conflicting_keys = set(hook_kwargs.keys()) & set(extra_kwargs.keys())
if conflicting_keys and not allow_conflicts:
msg = ("Hook '%(hook_name)s' is attempting to redefine attributes "
"'%(conflicting_keys)s'" % {
'hook_name': hook_name,
'conflicting_keys': conflicting_keys
})
raise Exception(msg)
extra_kwargs.update(hook_kwargs)
return extra_kwargs
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def _print(pt, order):
if sys.version_info >= (3, 0):
print(pt.get_string(sortby=order))
else:
print(strutils.safe_encode(pt.get_string(sortby=order)))
def print_list(objs, fields, formatters={}, order_by=None):
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if type(o) == dict and field in o:
data = o[field]
else:
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if order_by is None:
order_by = fields[0]
_print(pt, order_by)
def print_dict(d, property="Property"):
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in six.iteritems(d)]
_print(pt, property)
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
if sys.version_info <= (3, 0):
name_or_id = strutils.safe_decode(name_or_id)
# now try to get entity as uuid
try:
uuid.UUID(name_or_id)
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
return manager.find(display_name=name_or_id)
except (UnicodeDecodeError, exceptions.NotFound):
try:
# Volumes does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
def find_volume(cs, volume):
"""Get a volume by name or ID."""
return find_resource(cs.volumes, volume)
def _format_servers_list_networks(server):
output = []
for (network, addresses) in list(server.networks.items()):
if len(addresses) == 0:
continue
addresses_csv = ', '.join(addresses)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(output)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
def _load_entry_point(ep_name, name=None):
"""Try to load the entry point ep_name that matches name."""
for ep in pkg_resources.iter_entry_points(ep_name, name=name):
try:
return ep.load()
except (ImportError, pkg_resources.UnknownExtra, AttributeError):
continue
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, six.text_type):
value = six.text_type(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = six.text_type(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
| apache-2.0 |
chicagopython/CodingWorkshops | problems/webdev/django_pn_tracker/django_pn_tracker/apps/challenges/models.py | 1 | 1614 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class DateModelBase(models.Model):
# Learn about model fields:
# https://docs.djangoproject.com/en/2.1/ref/models/fields/
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
# Learn about abstract base classes:
# https://docs.djangoproject.com/en/2.1/topics/db/models/#abstract-base-classes
abstract = True
class ChallengeTools(DateModelBase, models.Model):
name = models.CharField(max_length=128)
description = models.TextField(blank=True, null=True)
def __str__(self):
return f"{self.name}"
class Meta:
# learn about django model Meta options:
# https://docs.djangoproject.com/en/2.1/ref/models/options/
verbose_name = "Challenge Tool"
verbose_name_plural = "Challenge Tools"
class Challenge(DateModelBase, models.Model):
name = models.CharField(max_length=128)
description = models.TextField(blank=True, null=True)
tools = models.ManyToManyField("challenges.ChallengeTools", blank=True)
def __str__(self):
return f"{self.name}"
########################################
# Fill in all ###s in the model below. #
# Uncomment the class when complete. #
########################################
# class AttendeeInfo(###, ###):
# date = ###
# name = ###
# challenge = ###(###, on_delete=models.CASCADE)
# skills = ###
#
# def __str__(self):
# return f"{self.challenge} {self.name}"
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.