repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
turbokongen/home-assistant | homeassistant/components/netgear_lte/__init__.py | 16 | 11560 | """Support for Netgear LTE modems."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import attr
import eternalegypt
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from . import sensor_types
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISPATCHER_NETGEAR_LTE = "netgear_lte_update"
DOMAIN = "netgear_lte"
DATA_KEY = "netgear_lte"
EVENT_SMS = "netgear_lte_sms"
SERVICE_DELETE_SMS = "delete_sms"
SERVICE_SET_OPTION = "set_option"
SERVICE_CONNECT_LTE = "connect_lte"
SERVICE_DISCONNECT_LTE = "disconnect_lte"
ATTR_HOST = "host"
ATTR_SMS_ID = "sms_id"
ATTR_FROM = "from"
ATTR_MESSAGE = "message"
ATTR_FAILOVER = "failover"
ATTR_AUTOCONNECT = "autoconnect"
FAILOVER_MODES = ["auto", "wire", "mobile"]
AUTOCONNECT_MODES = ["never", "home", "always"]
NOTIFY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
vol.Optional(CONF_RECIPIENT, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_SENSORS)])
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_BINARY_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_BINARY_SENSORS)])
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN, default={}): vol.All(
cv.ensure_list, [NOTIFY_SCHEMA]
),
vol.Optional(SENSOR_DOMAIN, default={}): SENSOR_SCHEMA,
vol.Optional(
BINARY_SENSOR_DOMAIN, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
DELETE_SMS_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HOST): cv.string,
vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]),
}
)
SET_OPTION_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_FAILOVER, ATTR_AUTOCONNECT),
{
vol.Optional(ATTR_HOST): cv.string,
vol.Optional(ATTR_FAILOVER): vol.In(FAILOVER_MODES),
vol.Optional(ATTR_AUTOCONNECT): vol.In(AUTOCONNECT_MODES),
},
)
)
CONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
DISCONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
@attr.s
class ModemData:
"""Class for modem state."""
hass = attr.ib()
host = attr.ib()
modem = attr.ib()
data = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
async def async_update(self):
"""Call the API to update the data."""
try:
self.data = await self.modem.information()
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.data = None
async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get modem_data for the host in config."""
if config[CONF_HOST] is not None:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) != 1:
return None
return next(iter(self.modem_data.values()))
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
hass.data[DATA_KEY] = LTEData(websession)
async def service_handler(service):
"""Apply a service."""
host = service.data.get(ATTR_HOST)
conf = {CONF_HOST: host}
modem_data = hass.data[DATA_KEY].get_modem_data(conf)
if not modem_data:
_LOGGER.error("%s: host %s unavailable", service.service, host)
return
if service.service == SERVICE_DELETE_SMS:
for sms_id in service.data[ATTR_SMS_ID]:
await modem_data.modem.delete_sms(sms_id)
elif service.service == SERVICE_SET_OPTION:
failover = service.data.get(ATTR_FAILOVER)
if failover:
await modem_data.modem.set_failover_mode(failover)
autoconnect = service.data.get(ATTR_AUTOCONNECT)
if autoconnect:
await modem_data.modem.set_autoconnect_mode(autoconnect)
elif service.service == SERVICE_CONNECT_LTE:
await modem_data.modem.connect_lte()
elif service.service == SERVICE_DISCONNECT_LTE:
await modem_data.modem.disconnect_lte()
service_schemas = {
SERVICE_DELETE_SMS: DELETE_SMS_SCHEMA,
SERVICE_SET_OPTION: SET_OPTION_SCHEMA,
SERVICE_CONNECT_LTE: CONNECT_LTE_SCHEMA,
SERVICE_DISCONNECT_LTE: DISCONNECT_LTE_SCHEMA,
}
for service, schema in service_schemas.items():
hass.services.async_register(
DOMAIN, service, service_handler, schema=schema
)
netgear_lte_config = config[DOMAIN]
# Set up each modem
tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config]
await asyncio.wait(tasks)
# Load platforms for each modem
for lte_conf in netgear_lte_config:
# Notify
for notify_conf in lte_conf[NOTIFY_DOMAIN]:
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
CONF_NAME: notify_conf.get(CONF_NAME),
NOTIFY_DOMAIN: notify_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config
)
)
# Sensor
sensor_conf = lte_conf.get(SENSOR_DOMAIN)
discovery_info = {CONF_HOST: lte_conf[CONF_HOST], SENSOR_DOMAIN: sensor_conf}
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
# Binary Sensor
binary_sensor_conf = lte_conf.get(BINARY_SENSOR_DOMAIN)
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
BINARY_SENSOR_DOMAIN: binary_sensor_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, BINARY_SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(hass, host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
def fire_sms_event(sms):
"""Send an SMS event."""
data = {
ATTR_HOST: modem_data.host,
ATTR_SMS_ID: sms.id,
ATTR_FROM: sms.sender,
ATTR_MESSAGE: sms.message,
}
hass.bus.async_fire(EVENT_SMS, data)
await modem_data.modem.add_sms_listener(fire_sms_event)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def _update(now):
"""Periodic update."""
await modem_data.async_update()
update_unsub = async_track_time_interval(hass, _update, SCAN_INTERVAL)
async def cleanup(event):
"""Clean up resources."""
update_unsub()
await modem_data.modem.logout()
del hass.data[DATA_KEY].modem_data[modem_data.host]
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
_LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2 * delay, 300)
@attr.s
class LTEEntity(Entity):
"""Base LTE entity."""
modem_data = attr.ib()
sensor_type = attr.ib()
_unique_id = attr.ib(init=False)
@_unique_id.default
def _init_unique_id(self):
"""Register unique_id while we know data is valid."""
return f"{self.sensor_type}_{self.modem_data.data.serial_number}"
async def async_added_to_hass(self):
"""Register callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, DISPATCHER_NETGEAR_LTE, self.async_write_ha_state
)
)
async def async_update(self):
"""Force update of state."""
await self.modem_data.async_update()
@property
def should_poll(self):
"""Return that the sensor should not be polled."""
return False
@property
def available(self):
"""Return the availability of the sensor."""
return self.modem_data.data is not None
@property
def unique_id(self):
"""Return a unique ID like 'usage_5TG365AB0078V'."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"Netgear LTE {self.sensor_type}"
| apache-2.0 |
gnperumal/exscript | src/Exscript/protocols/drivers/shell.py | 7 | 1299 | # Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
A generic shell driver that handles unknown unix shells.
"""
import re
from Exscript.protocols.drivers.driver import Driver
_user_re = [re.compile(r'(user|login): $', re.I)]
_password_re = [re.compile(r'Password: ?$')]
_linux_re = re.compile(r'\blinux\b', re.I)
class ShellDriver(Driver):
def __init__(self):
Driver.__init__(self, 'shell')
self.user_re = _user_re
self.password_re = _password_re
def check_head_for_os(self, string):
if _linux_re.search(string):
return 70
if _user_re[0].search(string):
return 20
return 0
| gpl-2.0 |
lin-credible/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
nhicher/ansible | lib/ansible/modules/cloud/openstack/os_server_group.py | 34 | 4713 | #!/usr/bin/python
# Copyright (c) 2016 Catalyst IT Limited
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_group
short_description: Manage OpenStack server groups
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Lingxian Kong (@kong)"
description:
- Add or remove server groups from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(policies) is required.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Server group name.
required: true
policies:
description:
- A list of one or more policy names to associate with the server
group. The list must contain at least one policy name. The current
valid policy names are anti-affinity, affinity, soft-anti-affinity
and soft-affinity.
required: false
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a server group with 'affinity' policy.
- os_server_group:
state: present
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: my_server_group
policies:
- affinity
# Delete 'my_server_group' server group.
- os_server_group:
state: absent
auth:
auth_url: https://identity.example.com
username: admin
password: admin
project_name: admin
name: my_server_group
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: The name of the server group.
returned: success
type: string
policies:
description: A list of one or more policy names of the server group.
returned: success
type: list
members:
description: A list of members in the server group.
returned: success
type: list
metadata:
description: Metadata key and value pairs.
returned: success
type: dict
project_id:
description: The project ID who owns the server group.
returned: success
type: string
user_id:
description: The user ID who owns the server group.
returned: success
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, server_group):
if state == 'present' and not server_group:
return True
if state == 'absent' and server_group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
policies=dict(required=False, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
name = module.params['name']
policies = module.params['policies']
state = module.params['state']
sdk, cloud = openstack_cloud_from_module(module)
try:
server_group = cloud.get_server_group(name)
if module.check_mode:
module.exit_json(
changed=_system_state_change(state, server_group)
)
changed = False
if state == 'present':
if not server_group:
if not policies:
module.fail_json(
msg="Parameter 'policies' is required in Server Group "
"Create"
)
server_group = cloud.create_server_group(name, policies)
changed = True
module.exit_json(
changed=changed,
id=server_group['id'],
server_group=server_group
)
if state == 'absent':
if server_group:
cloud.delete_server_group(server_group['id'])
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
| gpl-3.0 |
charleswhchan/ansible | lib/ansible/module_utils/redhat.py | 78 | 10206 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), James Laska
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import types
import ConfigParser
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
cfg = ConfigParser.ConfigParser()
cfg.read([plugin_conf])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(plugin_conf, 'rwa+')
cfg.write(fd)
fd.close()
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = ConfigParser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
# Quick version...
if False:
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.append('--activationkey "%s"' % activationkey)
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
# Do the needful...
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
Unsubscribe a system from all subscribed channels
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.PoolId
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module):
self.module = module
self.products = self._load_product_list()
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self):
"""
Loads list of all available pools for system in data structure
"""
args = "subscription-manager list --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of an output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':',1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
#else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter(self, regexp='^$'):
'''
Return a list of RhsmPools whose name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
| gpl-3.0 |
bstark2/AggieAir_MESA | sw/ground_segment/python/settings_app/settingsframe.py | 14 | 6094 | #Boa:Frame:PlotFrame
import wx
import sys
import os
sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/lib/python")
from settings_tool import IvySettingsInterface
def create(parent, ac_ids):
return SettingsFrame(parent, ac_ids)
SLIDER_ID_OFFSET = 250000
BUTTON_ID_OFFSET = 2 * 250000
SLIDER_FACTOR = 100
# Wraps TextCtrl to provide added functionality
class TextCtrlSetting(wx.TextCtrl):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
wx.TextCtrl.__init__(self, parent=parent, name=setting.shortname, id=setting.index)
self.Bind(wx.EVT_TEXT, self.onEvtText, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def onEvtText(self, event):
index = int(self.GetId())
try:
value = float(self.GetValue())
self.update_callback(index, value)
except:
return
# helper function to toggle edit box boldness (bold = user-set, normal=downlink-received)
def setBold(self, bold):
font = self.GetFont()
if (bold):
font.SetWeight(wx.FONTWEIGHT_BOLD)
else:
font.SetWeight(wx.FONTWEIGHT_NORMAL)
self.SetFont(font)
def SetSettingValue(self, value):
if (self.setting.step < 1):
self.SetValue("%.2f" % float(value))
else:
self.SetValue("%i" % int(float(value)))
# Wraps slider
class SettingCtrl(wx.Slider):
update_callback = None
def __init__(self, parent, setting):
self.setting = setting
max_v = int(setting.max_value) * SLIDER_FACTOR
min_v = int(setting.min_value) * SLIDER_FACTOR
if (min_v >= max_v):
max_v = max_v + 1
wx.Slider.__init__(self, parent=parent, minValue=min_v, maxValue=max_v, style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS, size=(200, 30), id=setting.index + SLIDER_ID_OFFSET)
self.SetLineSize(setting.step * SLIDER_FACTOR)
self.Bind(wx.EVT_MOUSEWHEEL, self.sliderWheel, self)
self.Bind(wx.EVT_SLIDER, self.OnEvtSlider, self)
def RegisterUpdateCallback(self, cb):
self.update_callback = cb
def OnEvtSlider(self, event):
slider = event.GetEventObject()
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
# Called on mouse wheel events (default handler seems backwards?)
def sliderWheel(self, event):
slider = event.GetEventObject()
if (event.GetWheelRotation() > 0):
slider.SetValue(slider.GetValue() + slider.GetLineSize())
else:
slider.SetValue(slider.GetValue() - slider.GetLineSize())
self.update_callback(slider.GetSettingIndex(), slider.GetSettingValue())
def GetSettingIndex(self):
index = int(self.GetId())
if index >= SLIDER_ID_OFFSET:
index = index - SLIDER_ID_OFFSET
return index
def SetSettingValue(self, value):
self.SetValue(int(float(value)) * SLIDER_FACTOR)
def GetSettingValue(self):
if (self.setting.step < 1):
return float(self.GetValue()) / SLIDER_FACTOR
else:
return int(self.GetValue()) / SLIDER_FACTOR
class SettingsFrame(wx.Frame):
edits = []
sliders = []
def __init__(self, parent, ac_ids):
self.settings = IvySettingsInterface(ac_ids)
title = "Settings %s (%s)" % (ac_ids, self.settings.GetACName())
wx.Frame.__init__(self, name=u'SettingsFrame', parent=parent, title=title, size=(480, 320))
self.book = wx.Notebook(self)
self.updates = []
self.Bind( wx.EVT_CLOSE, self.OnClose)
for setting_group in self.settings.groups:
page = wx.Panel(self.book)
vert_box = wx.BoxSizer(orient=wx.VERTICAL)
for setting in setting_group.member_list:
horz_box = wx.BoxSizer(orient=wx.HORIZONTAL)
text = wx.StaticText(page, label=setting.shortname, size=(100,30))
# Edit
edit = TextCtrlSetting(page, setting)
edit.RegisterUpdateCallback(self.editUpdate)
self.edits.append(edit)
# Slider
slider = SettingCtrl(page, setting)
slider.RegisterUpdateCallback(self.updateEditFromSlider)
self.sliders.append(slider)
# Button
button = wx.Button(page, id=setting.index + BUTTON_ID_OFFSET, label="Apply")
self.Bind(wx.EVT_BUTTON, self.onButton)
horz_box.AddWindow(text)
horz_box.AddWindow(edit)
horz_box.AddWindow(slider)
horz_box.AddWindow(button)
vert_box.AddWindow(horz_box)
page.SetSizer(vert_box)
self.book.AddPage(page, setting_group.name)
self.settings.RegisterCallback(self.onUpdate)
# Copy slider value into associated edit box
def updateEditFromSlider(self, index, value):
self.edits[index].ChangeValue(str(value))
self.edits[index].setBold(True)
# Called on edit box update
def editUpdate(self, index, value):
self.sliders[index].SetSettingValue(value)
self.edits[index].setBold(True)
# Called on button push
def onButton(self, event):
button = event.GetEventObject()
index = int(button.GetId())
if index >= BUTTON_ID_OFFSET:
index = index - BUTTON_ID_OFFSET
self.settings.lookup[index].value = self.sliders[index].GetSettingValue()
self.settings.SendSetting(index)
# Called for remote settings updates
def onUpdate(self, index, value, fromRemote):
# Schedule the call for later via wx (run after events)
# to prevent crashy crashy
wx.CallAfter(self.update_value, index, value, fromRemote)
# Called to update GUI with new values
def update_value(self, index, value, fromRemote):
editCtrl = self.edits[index]
if fromRemote and editCtrl.FindFocus() == editCtrl:
# don't process remote updates if the control is focused
return
editCtrl.SetSettingValue(value)
editCtrl.setBold(not fromRemote)
self.sliders[index].SetSettingValue(value)
def OnClose(self, event):
# need to forward close to canvas so that ivy is shut down, otherwise ivy hangs the shutdown
self.settings.OnClose()
self.Destroy()
| gpl-2.0 |
idlesign/srptools | tests/test_client_server.py | 1 | 4336 | from __future__ import unicode_literals
import pytest
from srptools import SRPContext, SRPClientSession, SRPServerSession, SRPException
from srptools.utils import int_from_hex, value_encode
def test_extended():
# Preliminary steps.
context = SRPContext('alice', 'password123')
# Generate basic user auth data usually stored on server.
username, password_verifier, salt = context.get_user_data_triplet()
# And gather basic numbers for client and server to agree upon.
prime = context.prime
gen = context.generator
salt_b64 = value_encode(int_from_hex(salt), base64=True)
# Actual negotiation
# Receive username from client and generate server public.
server_session = SRPServerSession(SRPContext(username, prime=prime, generator=gen), password_verifier)
server_public = server_session.public
server_public_b64 = server_session.public_b64
server_private = server_session.private
assert server_session.private_b64
# Receive server public and salt and process them.
client_session = SRPClientSession(SRPContext(username, 'password123', prime=prime, generator=gen))
client_session.process(server_public, salt)
# Generate client public and session key proof.
client_public = client_session.public
client_public_b64 = client_session.public_b64
client_session_key_proof = client_session.key_proof
client_private = client_session.private
assert client_session.private_b64
# Process client public and verify session key proof.
server_session.process(client_public, salt)
assert server_session.verify_proof(client_session_key_proof)
# Generate session key proof hash.
server_session_key_proof_hash = client_session.key_proof_hash
# Verify session key proff hash received from server.
assert client_session.verify_proof(server_session_key_proof_hash)
assert client_session.key_b64
assert client_session.key_proof_b64
assert client_session.key_proof_hash_b64
# Restore sessions from privates.
server_session = SRPServerSession(
SRPContext(username, prime=prime, generator=gen), password_verifier,
private=server_private)
client_session = SRPClientSession(
SRPContext(username, 'password123', prime=prime, generator=gen),
private=client_private)
skey_cl, skey_proof_cl, skey_prove_hash_cl = client_session.process(server_public, salt)
skey_srv, skey_proof_srv, skey_prove_hash_srv = server_session.process(client_public, salt)
assert skey_cl == skey_srv
assert skey_proof_cl == skey_proof_srv
# Base 64 test
skey_cl, skey_proof_cl, skey_prove_hash_cl = client_session.process(server_public_b64, salt_b64, base64=True)
skey_srv, skey_proof_srv, skey_prove_hash_srv = server_session.process(client_public_b64, salt_b64, base64=True)
assert skey_cl == skey_srv
assert skey_proof_cl == skey_proof_srv
def test_simple():
# Agree on communication details.
context = SRPContext('alice', 'password123')
username, password_verifier, salt = context.get_user_data_triplet()
prime = context.prime
gen = context.generator
# Receive username from client and generate server public.
server_session = SRPServerSession(SRPContext(username, prime=prime, generator=gen), password_verifier)
server_public = server_session.public
# Receive server public and salt and process them.
client_session = SRPClientSession(SRPContext(username, 'password123', prime=prime, generator=gen))
client_session.process(server_public, salt)
# Generate client public and session key.
client_public = client_session.public
client_session_key = client_session.key
# Process client public and compare session keys.
server_session.process(client_public, salt)
server_session_key = server_session.key
assert server_session_key == client_session_key
def test_raises():
server_session = SRPServerSession(SRPContext('1', '2'), '1')
server_session._context._prime = 1 # to trigger error
with pytest.raises(SRPException):
server_session.init_common_secret('1')
client_session = SRPClientSession(SRPContext('1', '2'))
client_session._context._prime = 1 # to trigger error
with pytest.raises(SRPException):
client_session.init_common_secret('1')
| bsd-3-clause |
mcollins12321/anita | venv/lib/python2.7/site-packages/sqlalchemy/testing/plugin/noseplugin.py | 21 | 2813 | # plugin/noseplugin.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Enhance nose with extra options and behaviors for running SQLAlchemy tests.
Must be run via ./sqla_nose.py so that it is imported in the expected
way (e.g. as a package-less import).
"""
import os
import sys
from nose.plugins import Plugin
fixtures = None
# no package imports yet! this prevents us from tripping coverage
# too soon.
path = os.path.join(os.path.dirname(__file__), "plugin_base.py")
if sys.version_info >= (3, 3):
from importlib import machinery
plugin_base = machinery.SourceFileLoader(
"plugin_base", path).load_module()
else:
import imp
plugin_base = imp.load_source("plugin_base", path)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
global fixtures
from sqlalchemy.testing import fixtures # noqa
def begin(self):
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
if fn.__module__ is None:
return False
if fn.__module__.startswith('sqlalchemy.testing'):
return False
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
if not hasattr(test.test, 'cls'):
return
plugin_base.before_test(
test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
| mit |
xq262144/hue | apps/oozie/src/oozie/migrations/0005_auto__add_shell.py | 40 | 20977 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Shell'
db.create_table('oozie_shell', (
('files', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('job_xml', self.gf('django.db.models.fields.CharField')(default='', max_length=512, blank=True)),
('job_properties', self.gf('django.db.models.fields.TextField')(default='[]')),
('capture_output', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('params', self.gf('django.db.models.fields.TextField')(default='[]')),
('archives', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('node_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True)),
('prepares', self.gf('django.db.models.fields.TextField')(default='[]')),
('command', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('oozie', ['Shell'])
def backwards(self, orm):
# Deleting model 'Shell'
db.delete_table('oozie_shell')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 22, 18, 58, 40, 375811)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 19, 18, 58, 40, 375778)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 19, 18, 58, 40, 377077)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"},{"name":"oozie.hive.defaults","value":"${hive.default.xml}"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
| apache-2.0 |
Francis-Liu/animated-broccoli | nova/tests/unit/scheduler/filters/test_disk_filters.py | 58 | 4487 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import disk_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestDiskFilter(test.NoDBTestCase):
def setUp(self):
super(TestDiskFilter, self).setUp()
def test_disk_filter_passes(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1, 'swap': 512}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 10,
'ephemeral_gb': 1, 'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 18, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_value_error(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 1,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
agg_mock.return_value = set(['XXX'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_default_value(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 2,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
# Uses global conf.
agg_mock.return_value = set([])
self.assertFalse(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
agg_mock.return_value = set(['2'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
| apache-2.0 |
rrrene/django | tests/template_tests/syntax_tests/test_cache.py | 299 | 6777 | from django.core.cache import cache
from django.template import Context, Engine, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from ..utils import setup
class CacheTagTests(SimpleTestCase):
libraries = {
'cache': 'django.templatetags.cache',
'custom': 'template_tests.templatetags.custom',
}
def tearDown(self):
cache.clear()
@setup({'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}'})
def test_cache03(self):
output = self.engine.render_to_string('cache03')
self.assertEqual(output, 'cache03')
@setup({
'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}',
'cache04': '{% load cache %}{% cache 2 test %}cache04{% endcache %}',
})
def test_cache04(self):
self.engine.render_to_string('cache03')
output = self.engine.render_to_string('cache04')
self.assertEqual(output, 'cache03')
@setup({'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}'})
def test_cache05(self):
output = self.engine.render_to_string('cache05', {'foo': 1})
self.assertEqual(output, 'cache05')
@setup({'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}'})
def test_cache06(self):
output = self.engine.render_to_string('cache06', {'foo': 2})
self.assertEqual(output, 'cache06')
@setup({
'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}',
'cache07': '{% load cache %}{% cache 2 test foo %}cache07{% endcache %}',
})
def test_cache07(self):
context = {'foo': 1}
self.engine.render_to_string('cache05', context)
output = self.engine.render_to_string('cache07', context)
self.assertEqual(output, 'cache05')
@setup({
'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}',
'cache08': '{% load cache %}{% cache time test foo %}cache08{% endcache %}',
})
def test_cache08(self):
"""
Allow first argument to be a variable.
"""
context = {'foo': 2, 'time': 2}
self.engine.render_to_string('cache06', context)
output = self.engine.render_to_string('cache08', context)
self.assertEqual(output, 'cache06')
# Raise exception if we don't have at least 2 args, first one integer.
@setup({'cache11': '{% load cache %}{% cache %}{% endcache %}'})
def test_cache11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache11')
@setup({'cache12': '{% load cache %}{% cache 1 %}{% endcache %}'})
def test_cache12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache12')
@setup({'cache13': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache13(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache13')
@setup({'cache14': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache14', {'foo': 'fail'})
@setup({'cache15': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache15', {'foo': []})
@setup({'cache16': '{% load cache %}{% cache 1 foo bar %}{% endcache %}'})
def test_cache16(self):
"""
Regression test for #7460.
"""
output = self.engine.render_to_string('cache16', {'foo': 'foo', 'bar': 'with spaces'})
self.assertEqual(output, '')
@setup({'cache17': '{% load cache %}{% cache 10 long_cache_key poem %}Some Content{% endcache %}'})
def test_cache17(self):
"""
Regression test for #11270.
"""
output = self.engine.render_to_string('cache17', {'poem': 'Oh freddled gruntbuggly/'
'Thy micturations are to me/'
'As plurdled gabbleblotchits/'
'On a lurgid bee/'
'That mordiously hath bitled out/'
'Its earted jurtles/'
'Into a rancid festering/'
'Or else I shall rend thee in the gobberwarts'
'with my blurglecruncheon/'
'See if I dont.'})
self.assertEqual(output, 'Some Content')
@setup({'cache18': '{% load cache custom %}{% cache 2|noop:"x y" cache18 %}cache18{% endcache %}'})
def test_cache18(self):
"""
Test whitespace in filter arguments
"""
output = self.engine.render_to_string('cache18')
self.assertEqual(output, 'cache18')
class CacheTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(libraries={'cache': 'django.templatetags.cache'})
super(CacheTests, cls).setUpClass()
def test_cache_regression_20130(self):
t = self.engine.from_string('{% load cache %}{% cache 1 regression_20130 %}foo{% endcache %}')
cachenode = t.nodelist[1]
self.assertEqual(cachenode.fragment_name, 'regression_20130')
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default',
},
'template_fragments': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'fragments',
},
})
def test_cache_fragment_cache(self):
"""
When a cache called "template_fragments" is present, the cache tag
will use it in preference to 'default'
"""
t1 = self.engine.from_string('{% load cache %}{% cache 1 fragment %}foo{% endcache %}')
t2 = self.engine.from_string('{% load cache %}{% cache 1 fragment using="default" %}bar{% endcache %}')
ctx = Context()
o1 = t1.render(ctx)
o2 = t2.render(ctx)
self.assertEqual(o1, 'foo')
self.assertEqual(o2, 'bar')
def test_cache_missing_backend(self):
"""
When a cache that doesn't exist is specified, the cache tag will
raise a TemplateSyntaxError
'"""
t = self.engine.from_string('{% load cache %}{% cache 1 backend using="unknown" %}bar{% endcache %}')
ctx = Context()
with self.assertRaises(TemplateSyntaxError):
t.render(ctx)
| bsd-3-clause |
TEAM-Gummy/platform_external_chromium_org | third_party/tlslite/tlslite/utils/OpenSSL_AES.py | 359 | 1822 | """OpenSSL/M2Crypto AES implementation."""
from cryptomath import *
from AES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_AES(key, mode, IV)
class OpenSSL_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
if len(self.key)==16:
cipherType = m2.aes_128_cbc()
if len(self.key)==24:
cipherType = m2.aes_192_cbc()
if len(self.key)==32:
cipherType = m2.aes_256_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will discard it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext
| bsd-3-clause |
spcui/autotest | server/frontend.py | 3 | 34387 | # Copyright Martin J. Bligh, Google Inc 2008
# Released under the GPL v2
"""
This class allows you to communicate with the frontend to submit jobs etc
It is designed for writing more sophisiticated server-side control files that
can recursively add and manage other jobs.
We turn the JSON dictionaries into real objects that are more idiomatic
For docs, see:
http://autotest/afe/server/rpc_doc/
http://autotest/new_tko/server/rpc_doc/
http://docs.djangoproject.com/en/dev/ref/models/querysets/#queryset-api
"""
import getpass, os, time, traceback, re
try:
import autotest.common as common
except ImportError:
import common
from autotest.frontend.afe import rpc_client_lib
from autotest.client.shared.settings import settings
from autotest.client.shared import utils
try:
# Here we are importing site utils only if it exists
# pylint: disable=E0611
from autotest.server.site_common import site_utils as server_utils
except ImportError:
from autotest.server import utils as server_utils
form_ntuples_from_machines = server_utils.form_ntuples_from_machines
DEFAULT_SERVER = 'autotest'
def dump_object(header, obj):
"""
Standard way to print out the frontend objects (eg job, host, acl, label)
in a human-readable fashion for debugging
"""
result = header + '\n'
for key in obj.hash:
if key == 'afe' or key == 'hash':
continue
result += '%20s: %s\n' % (key, obj.hash[key])
return result
class RpcClient(object):
"""
Abstract RPC class for communicating with the autotest frontend
Inherited for both TKO and AFE uses.
All the constructors go in the afe / tko class.
Manipulating methods go in the object classes themselves
"""
def __init__(self, path, user, server, print_log, debug, reply_debug):
"""
Create a cached instance of a connection to the frontend
user: username to connect as
server: frontend server to connect to
print_log: pring a logging message to stdout on every operation
debug: print out all RPC traffic
"""
if not user:
user = getpass.getuser()
if not server:
if 'AUTOTEST_WEB' in os.environ:
server = os.environ['AUTOTEST_WEB']
else:
server = settings.get_value('SERVER', 'hostname',
default=DEFAULT_SERVER)
self.server = server
self.user = user
self.print_log = print_log
self.debug = debug
self.reply_debug = reply_debug
http_server = 'http://' + server
headers = rpc_client_lib.authorization_headers(user, http_server)
rpc_server = http_server + path
if debug:
print 'SERVER: %s' % rpc_server
print 'HEADERS: %s' % headers
self.proxy = rpc_client_lib.get_proxy(rpc_server, headers=headers)
def run(self, call, **dargs):
"""
Make a RPC call to the AFE server
"""
rpc_call = getattr(self.proxy, call)
if self.debug:
print 'DEBUG: %s %s' % (call, dargs)
try:
result = utils.strip_unicode(rpc_call(**dargs))
if self.reply_debug:
print result
return result
except Exception:
print 'FAILED RPC CALL: %s %s' % (call, dargs)
raise
def log(self, message):
if self.print_log:
print message
class Planner(RpcClient):
def __init__(self, user=None, server=None, print_log=True, debug=False,
reply_debug=False):
super(Planner, self).__init__(path='/planner/server/rpc/',
user=user,
server=server,
print_log=print_log,
debug=debug,
reply_debug=reply_debug)
class TKO(RpcClient):
def __init__(self, user=None, server=None, print_log=True, debug=False,
reply_debug=False):
super(TKO, self).__init__(path='/new_tko/server/rpc/',
user=user,
server=server,
print_log=print_log,
debug=debug,
reply_debug=reply_debug)
def get_status_counts(self, job, **data):
entries = self.run('get_status_counts',
group_by=['hostname', 'test_name', 'reason'],
job_tag__startswith='%s-' % job, **data)
return [TestStatus(self, e) for e in entries['groups']]
class AFE(RpcClient):
def __init__(self, user=None, server=None, print_log=True, debug=False,
reply_debug=False, job=None):
self.job = job
super(AFE, self).__init__(path='/afe/server/rpc/',
user=user,
server=server,
print_log=print_log,
debug=debug,
reply_debug=reply_debug)
def host_statuses(self, live=None):
dead_statuses = ['Repair Failed', 'Repairing']
statuses = self.run('get_static_data')['host_statuses']
if live == True:
return list(set(statuses) - set(dead_statuses))
if live == False:
return dead_statuses
else:
return statuses
@staticmethod
def _dict_for_host_query(hostnames=(), status=None, label=None):
query_args = {}
if hostnames:
query_args['hostname__in'] = hostnames
if status:
query_args['status'] = status
if label:
query_args['labels__name'] = label
return query_args
def get_hosts(self, hostnames=(), status=None, label=None, **dargs):
query_args = dict(dargs)
query_args.update(self._dict_for_host_query(hostnames=hostnames,
status=status,
label=label))
hosts = self.run('get_hosts', **query_args)
return [Host(self, h) for h in hosts]
def get_profiles(self):
return self.run('get_profiles')
def get_hostnames(self, status=None, label=None, **dargs):
"""Like get_hosts() but returns hostnames instead of Host objects."""
# This implementation can be replaced with a more efficient one
# that does not query for entire host objects in the future.
return [host_obj.hostname for host_obj in
self.get_hosts(status=status, label=label, **dargs)]
def reverify_hosts(self, hostnames=(), status=None, label=None):
query_args = dict(locked=False,
aclgroup__users__login=self.user)
query_args.update(self._dict_for_host_query(hostnames=hostnames,
status=status,
label=label))
return self.run('reverify_hosts', **query_args)
def create_host(self, hostname, **dargs):
id = self.run('add_host', hostname=hostname, **dargs)
return self.get_hosts(id=id)[0]
def get_labels(self, **dargs):
labels = self.run('get_labels', **dargs)
return [Label(self, l) for l in labels]
def create_label(self, name, **dargs):
id = self.run('add_label', name=name, **dargs)
return self.get_labels(id=id)[0]
def get_acls(self, **dargs):
acls = self.run('get_acl_groups', **dargs)
return [Acl(self, a) for a in acls]
def create_acl(self, name, **dargs):
id = self.run('add_acl_group', name=name, **dargs)
return self.get_acls(id=id)[0]
def get_users(self, **dargs):
users = self.run('get_users', **dargs)
return [User(self, u) for u in users]
def generate_control_file(self, tests, **dargs):
ret = self.run('generate_control_file', tests=tests, **dargs)
return ControlFile(self, ret)
def get_jobs(self, summary=False, **dargs):
if summary:
jobs_data = self.run('get_jobs_summary', **dargs)
else:
jobs_data = self.run('get_jobs', **dargs)
jobs = []
for j in jobs_data:
job = Job(self, j)
# Set up some extra information defaults
job.testname = re.sub('\s.*', '', job.name) # arbitrary default
job.platform_results = {}
job.platform_reasons = {}
jobs.append(job)
return jobs
def get_host_queue_entries(self, **data):
entries = self.run('get_host_queue_entries', **data)
job_statuses = [JobStatus(self, e) for e in entries]
# Sadly, get_host_queue_entries doesn't return platforms, we have
# to get those back from an explicit get_hosts queury, then patch
# the new host objects back into the host list.
hostnames = [s.host.hostname for s in job_statuses if s.host]
host_hash = {}
for host in self.get_hosts(hostname__in=hostnames):
host_hash[host.hostname] = host
for status in job_statuses:
if status.host:
status.host = host_hash[status.host.hostname]
# filter job statuses that have either host or meta_host
return [status for status in job_statuses if (status.host or
status.meta_host)]
def create_job_by_test(self, tests, kernel=None, use_container=False,
kernel_cmdline=None, **dargs):
"""
Given a test name, fetch the appropriate control file from the server
and submit it.
@param kernel: A comma separated list of kernel versions to boot.
@param kernel_cmdline: The command line used to boot all kernels listed
in the kernel parameter.
Returns a list of job objects
"""
assert ('hosts' in dargs or
'atomic_group_name' in dargs and 'synch_count' in dargs)
if kernel:
kernel_list = re.split('[\s,]+', kernel.strip())
kernel_info = []
for version in kernel_list:
kernel_dict = {'version': version}
if kernel_cmdline is not None:
kernel_dict['cmdline'] = kernel_cmdline
kernel_info.append(kernel_dict)
else:
kernel_info = None
control_file = self.generate_control_file(
tests=tests, kernel=kernel_info, use_container=use_container)
if control_file.is_server:
dargs['control_type'] = 'Server'
else:
dargs['control_type'] = 'Client'
dargs['dependencies'] = dargs.get('dependencies', []) + \
control_file.dependencies
dargs['control_file'] = control_file.control_file
if not dargs.get('synch_count', None):
dargs['synch_count'] = control_file.synch_count
if 'hosts' in dargs and len(dargs['hosts']) < dargs['synch_count']:
# will not be able to satisfy this request
return None
return self.create_job(**dargs)
def create_job(self, control_file, name=' ', priority='Medium',
control_type='Client', **dargs):
id = self.run('create_job', name=name, priority=priority,
control_file=control_file, control_type=control_type, **dargs)
return self.get_jobs(id=id)[0]
def run_test_suites(self, pairings, kernel, kernel_label=None,
priority='Medium', wait=True, poll_interval=10,
email_from=None, email_to=None, timeout=168,
max_runtime_hrs=168, kernel_cmdline=None):
"""
Run a list of test suites on a particular kernel.
Poll for them to complete, and return whether they worked or not.
@param pairings: List of MachineTestPairing objects to invoke.
@param kernel: Name of the kernel to run.
@param kernel_label: Label (string) of the kernel to run such as
'<kernel-version> : <config> : <date>'
If any pairing object has its job_label attribute set it
will override this value for that particular job.
@param kernel_cmdline: The command line to boot the kernel(s) with.
@param wait: boolean - Wait for the results to come back?
@param poll_interval: Interval between polling for job results (in mins)
@param email_from: Send notification email upon completion from here.
@param email_from: Send notification email upon completion to here.
"""
jobs = []
for pairing in pairings:
try:
new_job = self.invoke_test(pairing, kernel, kernel_label,
priority, timeout=timeout,
kernel_cmdline=kernel_cmdline,
max_runtime_hrs=max_runtime_hrs)
if not new_job:
continue
jobs.append(new_job)
except Exception, e:
traceback.print_exc()
if not wait or not jobs:
return
tko = TKO()
while True:
time.sleep(60 * poll_interval)
result = self.poll_all_jobs(tko, jobs, email_from, email_to)
if result is not None:
return result
def result_notify(self, job, email_from, email_to):
"""
Notify about the result of a job. Will always print, if email data
is provided, will send email for it as well.
job: job object to notify about
email_from: send notification email upon completion from here
email_from: send notification email upon completion to here
"""
if job.result == True:
subject = 'Testing PASSED: '
else:
subject = 'Testing FAILED: '
subject += '%s : %s\n' % (job.name, job.id)
text = []
for platform in job.results_platform_map:
for status in job.results_platform_map[platform]:
if status == 'Total':
continue
for host in job.results_platform_map[platform][status]:
text.append('%20s %10s %10s' % (platform, status, host))
if status == 'Failed':
for test_status in job.test_status[host].fail:
text.append('(%s, %s) : %s' % \
(host, test_status.test_name,
test_status.reason))
text.append('')
base_url = 'http://' + self.server
params = ('columns=test',
'rows=machine_group',
"condition=tag~'%s-%%25'" % job.id,
'title=Report')
query_string = '&'.join(params)
url = '%s/tko/compose_query.cgi?%s' % (base_url, query_string)
text.append(url + '\n')
url = '%s/afe/#tab_id=view_job&object_id=%s' % (base_url, job.id)
text.append(url + '\n')
smtp_info = {}
smtp_info['server'] = settings.get_value('SERVER', 'smtp_server',
default='localhost')
smtp_info['port'] = settings.get_value('SERVER', 'smtp_port',
default='')
smtp_info['user'] = settings.get_value('SERVER', 'smtp_user',
default='')
smtp_info['password'] = settings.get_value('SERVER', 'smtp_password',
default='')
body = '\n'.join(text)
print '---------------------------------------------------'
print 'Subject: ', subject
print body
print '---------------------------------------------------'
if email_from and email_to:
print 'Sending email ...'
utils.send_email(email_from, email_to, subject, body, smtp_info)
print
def print_job_result(self, job):
"""
Print the result of a single job.
job: a job object
"""
if job.result is None:
print 'PENDING',
elif job.result == True:
print 'PASSED',
elif job.result == False:
print 'FAILED',
elif job.result == "Abort":
print 'ABORT',
print ' %s : %s' % (job.id, job.name)
def poll_all_jobs(self, tko, jobs, email_from=None, email_to=None):
"""
Poll all jobs in a list.
jobs: list of job objects to poll
email_from: send notification email upon completion from here
email_from: send notification email upon completion to here
Returns:
a) All complete successfully (return True)
b) One or more has failed (return False)
c) Cannot tell yet (return None)
"""
results = []
for job in jobs:
if getattr(job, 'result', None) is None:
job.result = self.poll_job_results(tko, job)
if job.result is not None:
self.result_notify(job, email_from, email_to)
results.append(job.result)
self.print_job_result(job)
if None in results:
return None
elif False in results or "Abort" in results:
return False
else:
return True
def _included_platform(self, host, platforms):
"""
See if host's platforms matches any of the patterns in the included
platforms list.
"""
if not platforms:
return True # No filtering of platforms
for platform in platforms:
if re.search(platform, host.platform):
return True
return False
def invoke_test(self, pairing, kernel, kernel_label, priority='Medium',
kernel_cmdline=None, **dargs):
"""
Given a pairing of a control file to a machine label, find all machines
with that label, and submit that control file to them.
@param kernel_label: Label (string) of the kernel to run such as
'<kernel-version> : <config> : <date>'
If any pairing object has its job_label attribute set it
will override this value for that particular job.
@returns A list of job objects.
"""
# The pairing can override the job label.
if pairing.job_label:
kernel_label = pairing.job_label
job_name = '%s : %s' % (pairing.machine_label, kernel_label)
hosts = self.get_hosts(multiple_labels=[pairing.machine_label])
platforms = pairing.platforms
hosts = [h for h in hosts if self._included_platform(h, platforms)]
dead_statuses = self.host_statuses(live=False)
host_list = [h.hostname for h in hosts if h.status not in dead_statuses]
print 'HOSTS: %s' % host_list
if pairing.atomic_group_sched:
dargs['synch_count'] = pairing.synch_count
dargs['atomic_group_name'] = pairing.machine_label
else:
dargs['hosts'] = host_list
new_job = self.create_job_by_test(name=job_name,
dependencies=[pairing.machine_label],
tests=[pairing.control_file],
priority=priority,
kernel=kernel,
kernel_cmdline=kernel_cmdline,
use_container=pairing.container,
**dargs)
if new_job:
if pairing.testname:
new_job.testname = pairing.testname
print 'Invoked test %s : %s' % (new_job.id, job_name)
return new_job
def _job_test_results(self, tko, job, debug, tests=[]):
"""
Retrieve test results for a job
"""
job.test_status = {}
try:
test_statuses = tko.get_status_counts(job=job.id)
except Exception:
print "Ignoring exception on poll job; RPC interface is flaky"
traceback.print_exc()
return
for test_status in test_statuses:
# SERVER_JOB is buggy, and often gives false failures. Ignore it.
if test_status.test_name == 'SERVER_JOB':
continue
# if tests is not empty, restrict list of test_statuses to tests
if tests and test_status.test_name not in tests:
continue
if debug:
print test_status
hostname = test_status.hostname
if hostname not in job.test_status:
job.test_status[hostname] = TestResults()
job.test_status[hostname].add(test_status)
def _job_results_platform_map(self, job, debug):
# Figure out which hosts passed / failed / aborted in a job
# Creates a 2-dimensional hash, stored as job.results_platform_map
# 1st index - platform type (string)
# 2nd index - Status (string)
# 'Completed' / 'Failed' / 'Aborted'
# Data indexed by this hash is a list of hostnames (text strings)
job.results_platform_map = {}
try:
job_statuses = self.get_host_queue_entries(job=job.id)
except Exception:
print "Ignoring exception on poll job; RPC interface is flaky"
traceback.print_exc()
return None
platform_map = {}
job.job_status = {}
job.metahost_index = {}
for job_status in job_statuses:
# This is basically "for each host / metahost in the job"
if job_status.host:
hostname = job_status.host.hostname
else: # This is a metahost
metahost = job_status.meta_host
index = job.metahost_index.get(metahost, 1)
job.metahost_index[metahost] = index + 1
hostname = '%s.%s' % (metahost, index)
job.job_status[hostname] = job_status.status
status = job_status.status
# Skip hosts that failed verify or repair:
# that's a machine failure, not a job failure
if hostname in job.test_status:
verify_failed = False
for failure in job.test_status[hostname].fail:
if (failure.test_name == 'verify' or
failure.test_name == 'repair'):
verify_failed = True
break
if verify_failed:
continue
if hostname in job.test_status and job.test_status[hostname].fail:
# If the any tests failed in the job, we want to mark the
# job result as failed, overriding the default job status.
if status != "Aborted": # except if it's an aborted job
status = 'Failed'
if job_status.host:
platform = job_status.host.platform
else: # This is a metahost
platform = job_status.meta_host
if platform not in platform_map:
platform_map[platform] = {'Total' : [hostname]}
else:
platform_map[platform]['Total'].append(hostname)
new_host_list = platform_map[platform].get(status, []) + [hostname]
platform_map[platform][status] = new_host_list
job.results_platform_map = platform_map
def set_platform_results(self, test_job, platform, result):
"""
Result must be None, 'FAIL', 'WARN' or 'GOOD'
"""
if test_job.platform_results[platform] is not None:
# We're already done, and results recorded. This can't change later.
return
test_job.platform_results[platform] = result
# Note that self.job refers to the metajob we're IN, not the job
# that we're excuting from here.
testname = '%s.%s' % (test_job.testname, platform)
if self.job:
self.job.record(result, None, testname, status='')
def poll_job_results(self, tko, job, debug=False):
"""
Analyse all job results by platform, return:
False: if any platform has more than one failure
None: if any platform has more than one machine not yet Good.
True: if all platforms have at least all-but-one machines Good.
"""
self._job_test_results(tko, job, debug)
if job.test_status == {}:
return None
self._job_results_platform_map(job, debug)
good_platforms = []
failed_platforms = []
aborted_platforms = []
unknown_platforms = []
platform_map = job.results_platform_map
for platform in platform_map:
if not job.platform_results.has_key(platform):
# record test start, but there's no way to do this right now
job.platform_results[platform] = None
total = len(platform_map[platform]['Total'])
completed = len(platform_map[platform].get('Completed', []))
failed = len(platform_map[platform].get('Failed', []))
aborted = len(platform_map[platform].get('Aborted', []))
# We set up what we want to record here, but don't actually do
# it yet, until we have a decisive answer for this platform
if aborted or failed:
bad = aborted + failed
if (bad > 1) or (bad * 2 >= total):
platform_test_result = 'FAIL'
else:
platform_test_result = 'WARN'
if aborted > 1:
aborted_platforms.append(platform)
self.set_platform_results(job, platform, platform_test_result)
elif (failed * 2 >= total) or (failed > 1):
failed_platforms.append(platform)
self.set_platform_results(job, platform, platform_test_result)
elif (completed >= 1) and (completed + 1 >= total):
# if all or all but one are good, call the job good.
good_platforms.append(platform)
self.set_platform_results(job, platform, 'GOOD')
else:
unknown_platforms.append(platform)
detail = []
for status in platform_map[platform]:
if status == 'Total':
continue
detail.append('%s=%s' % (status,platform_map[platform][status]))
if debug:
print '%20s %d/%d %s' % (platform, completed, total,
' '.join(detail))
print
if len(aborted_platforms) > 0:
if debug:
print 'Result aborted - platforms: ',
print ' '.join(aborted_platforms)
return "Abort"
if len(failed_platforms) > 0:
if debug:
print 'Result bad - platforms: ' + ' '.join(failed_platforms)
return False
if len(unknown_platforms) > 0:
if debug:
platform_list = ' '.join(unknown_platforms)
print 'Result unknown - platforms: ', platform_list
return None
if debug:
platform_list = ' '.join(good_platforms)
print 'Result good - all platforms passed: ', platform_list
return True
class TestResults(object):
"""
Container class used to hold the results of the tests for a job
"""
def __init__(self):
self.good = []
self.fail = []
self.pending = []
def add(self, result):
if result.complete_count > result.pass_count:
self.fail.append(result)
elif result.incomplete_count > 0:
self.pending.append(result)
else:
self.good.append(result)
class RpcObject(object):
"""
Generic object used to construct python objects from rpc calls
"""
def __init__(self, afe, hash):
self.afe = afe
self.hash = hash
self.__dict__.update(hash)
def __str__(self):
return dump_object(self.__repr__(), self)
class ControlFile(RpcObject):
"""
AFE control file object
Fields: synch_count, dependencies, control_file, is_server
"""
def __repr__(self):
return 'CONTROL FILE: %s' % self.control_file
class Label(RpcObject):
"""
AFE label object
Fields:
name, invalid, platform, kernel_config, id, only_if_needed
"""
def __repr__(self):
return 'LABEL: %s' % self.name
def add_hosts(self, hosts):
return self.afe.run('label_add_hosts', self.id, hosts)
def remove_hosts(self, hosts):
return self.afe.run('label_remove_hosts', self.id, hosts)
class Acl(RpcObject):
"""
AFE acl object
Fields:
users, hosts, description, name, id
"""
def __repr__(self):
return 'ACL: %s' % self.name
def add_hosts(self, hosts):
self.afe.log('Adding hosts %s to ACL %s' % (hosts, self.name))
return self.afe.run('acl_group_add_hosts', self.id, hosts)
def remove_hosts(self, hosts):
self.afe.log('Removing hosts %s from ACL %s' % (hosts, self.name))
return self.afe.run('acl_group_remove_hosts', self.id, hosts)
def add_users(self, users):
self.afe.log('Adding users %s to ACL %s' % (users, self.name))
return self.afe.run('acl_group_add_users', id=self.name, users=users)
class Job(RpcObject):
"""
AFE job object
Fields:
name, control_file, control_type, synch_count, reboot_before,
run_verify, priority, email_list, created_on, dependencies,
timeout, owner, reboot_after, id
"""
def __repr__(self):
return 'JOB: %s' % self.id
class JobStatus(RpcObject):
"""
AFE job_status object
Fields:
status, complete, deleted, meta_host, host, active, execution_subdir, id
"""
def __init__(self, afe, hash):
# This should call super
self.afe = afe
self.hash = hash
self.__dict__.update(hash)
self.job = Job(afe, self.job)
if getattr(self, 'host'):
self.host = Host(afe, self.host)
def __repr__(self):
if self.host and self.host.hostname:
hostname = self.host.hostname
else:
hostname = 'None'
return 'JOB STATUS: %s-%s' % (self.job.id, hostname)
class Host(RpcObject):
"""
AFE host object
Fields:
status, lock_time, locked_by, locked, hostname, invalid,
synch_id, labels, platform, protection, dirty, id
"""
def __repr__(self):
return 'HOST OBJECT: %s' % self.hostname
def show(self):
labels = list(set(self.labels) - set([self.platform]))
print '%-6s %-7s %-7s %-16s %s' % (self.hostname, self.status,
self.locked, self.platform,
', '.join(labels))
def delete(self):
return self.afe.run('delete_host', id=self.id)
def modify(self, **dargs):
return self.afe.run('modify_host', id=self.id, **dargs)
def get_acls(self):
return self.afe.get_acls(hosts__hostname=self.hostname)
def add_acl(self, acl_name):
self.afe.log('Adding ACL %s to host %s' % (acl_name, self.hostname))
return self.afe.run('acl_group_add_hosts', id=acl_name,
hosts=[self.hostname])
def remove_acl(self, acl_name):
self.afe.log('Removing ACL %s from host %s' % (acl_name, self.hostname))
return self.afe.run('acl_group_remove_hosts', id=acl_name,
hosts=[self.hostname])
def get_labels(self):
return self.afe.get_labels(host__hostname__in=[self.hostname])
def add_labels(self, labels):
self.afe.log('Adding labels %s to host %s' % (labels, self.hostname))
return self.afe.run('host_add_labels', id=self.id, labels=labels)
def remove_labels(self, labels):
self.afe.log('Removing labels %s from host %s' % (labels,self.hostname))
return self.afe.run('host_remove_labels', id=self.id, labels=labels)
class User(RpcObject):
def __repr__(self):
return 'USER: %s' % self.login
class TestStatus(RpcObject):
"""
TKO test status object
Fields:
test_idx, hostname, testname, id
complete_count, incomplete_count, group_count, pass_count
"""
def __repr__(self):
return 'TEST STATUS: %s' % self.id
class MachineTestPairing(object):
"""
Object representing the pairing of a machine label with a control file
machine_label: use machines from this label
control_file: use this control file (by name in the frontend)
platforms: list of rexeps to filter platforms by. [] => no filtering
job_label: The label (name) to give to the autotest job launched
to run this pairing. '<kernel-version> : <config> : <date>'
"""
def __init__(self, machine_label, control_file, platforms=[],
container=False, atomic_group_sched=False, synch_count=0,
testname=None, job_label=None):
self.machine_label = machine_label
self.control_file = control_file
self.platforms = platforms
self.container = container
self.atomic_group_sched = atomic_group_sched
self.synch_count = synch_count
self.testname = testname
self.job_label = job_label
def __repr__(self):
return '%s %s %s %s' % (self.machine_label, self.control_file,
self.platforms, self.container)
| gpl-2.0 |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/generic_relations_regress/models.py | 103 | 2612 | from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
__all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address',
'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2',
'Contact', 'Organization', 'Note')
class Link(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return "Link to %s id=%s" % (self.content_type, self.object_id)
class Place(models.Model):
name = models.CharField(max_length=100)
links = generic.GenericRelation(Link)
def __unicode__(self):
return "Place: %s" % self.name
class Restaurant(Place):
def __unicode__(self):
return "Restaurant: %s" % self.name
class Address(models.Model):
street = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=5)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode)
class Person(models.Model):
account = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128)
addresses = generic.GenericRelation(Address)
def __unicode__(self):
return self.name
class CharLink(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.CharField(max_length=100)
content_object = generic.GenericForeignKey()
class TextLink(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.TextField()
content_object = generic.GenericForeignKey()
class OddRelation1(models.Model):
name = models.CharField(max_length=100)
clinks = generic.GenericRelation(CharLink)
class OddRelation2(models.Model):
name = models.CharField(max_length=100)
tlinks = generic.GenericRelation(TextLink)
# models for test_q_object_or:
class Note(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
note = models.TextField()
class Contact(models.Model):
notes = generic.GenericRelation(Note)
class Organization(models.Model):
name = models.CharField(max_length=255)
contacts = models.ManyToManyField(Contact, related_name='organizations')
| gpl-3.0 |
cancan101/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/numpy_io_test.py | 16 | 4499 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numpy_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import numpy_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class NumpyIoTest(test.TestCase):
def testNumpyInputFn(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithDifferentDimensionsOfFeatures(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [[1, 2], [3, 4]])
self.assertAllEqual(res[0]['b'], [5, 6])
self.assertAllEqual(res[1], [-32, -31])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithXAsNonDict(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(TypeError, 'x must be dict'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
def testNumpyInputFnWithTargetKeyAlreadyInX(self):
array = np.arange(32, 36)
x = {'__target_key__': array}
y = np.arange(4)
with self.test_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
self.assertAllEqual(x['__target_key__'], array)
self.assertAllEqual(x['__target_key___n'], y)
def testNumpyInputFnWithMismatchLengthOfInputs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y_longer_length, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x=x_mismatch_length,
y=None,
batch_size=2,
shuffle=False,
num_epochs=1)
failing_input_fn()
if __name__ == '__main__':
test.main()
| apache-2.0 |
mozilla-iam/cis | python-modules/cis_processor/tests/test_operation.py | 1 | 7363 | import base64
import boto3
import json
import logging
import os
import random
from botocore.stub import Stubber
from cis_profile import profile
from cis_profile import fake_profile
from everett.ext.inifile import ConfigIniEnv
from everett.manager import ConfigManager
from everett.manager import ConfigOSEnv
from moto import mock_dynamodb2
from mock import patch
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M"
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
def get_config():
return ConfigManager(
[ConfigIniEnv([os.environ.get("CIS_CONFIG_INI"), "~/.mozilla-cis.ini", "/etc/mozilla-cis.ini"]), ConfigOSEnv()]
)
def profile_to_vault_structure(user_profile):
return {
"sequence_number": str(random.randint(100000, 100000000)),
"primary_email": user_profile["primary_email"]["value"],
"profile": json.dumps(user_profile),
"user_uuid": user_profile["uuid"]["value"],
"primary_username": user_profile["primary_username"]["value"],
"id": user_profile["user_id"]["value"],
}
def kinesis_event_generate(user_profile):
fh = open("tests/fixture/kinesis-event.json")
kinesis_event_structure = json.loads(fh.read())
fh.close()
kinesis_event_structure["Records"][0]["kinesis"]["sequenceNumber"] = "900000000000"
kinesis_event_structure["Records"][0]["kinesis"]["parititionKey"] = "generic_publisher"
kinesis_event_structure["Records"][0]["kinesis"]["data"] = base64.b64encode(
json.dumps(user_profile).encode()
).decode()
return kinesis_event_structure
@mock_dynamodb2
class TestOperation(object):
def setup(self):
os.environ["CIS_CONFIG_INI"] = "tests/fixture/mozilla-cis.ini"
self.config = get_config()
from cis_profile import WellKnown
from cis_identity_vault import vault
os.environ["CIS_CONFIG_INI"] = "tests/fixture/mozilla-cis.ini"
well_known = WellKnown()
self.well_known_json = well_known.get_well_known()
self.dynamodb_client = boto3.client(
"dynamodb", region_name="us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk"
)
self.dynamodb_resource = boto3.resource(
"dynamodb", region_name="us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk"
)
self.vault_client = vault.IdentityVault()
self.vault_client.boto_session = Stubber(boto3.session.Session(region_name="us-west-2")).client
self.vault_client.dynamodb_client = self.dynamodb_client
self.vault_client.find_or_create()
self.table = self.dynamodb_resource.Table("purple-identity-vault")
self.mr_mozilla_profile = fake_profile.FakeUser(seed=1337).as_dict()
from cis_identity_vault.models import user
vault_interface = user.Profile(self.table, self.dynamodb_client, False)
vault_interface.create(profile_to_vault_structure(user_profile=self.mr_mozilla_profile))
self.mr_mozilla_change_event = kinesis_event_generate(self.mr_mozilla_profile)
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_base_operation_object_it_should_succeed(self, verify_sigs, verify_pubs):
verify_sigs.return_value = True
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "False"
patched_profile = self.mr_mozilla_profile
patched_profile["last_name"]["value"] = "anupdatedlastname"
kinesis_event = kinesis_event_generate(patched_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is True
from cis_identity_vault.models import user
p = user.Profile(self.table, self.dynamodb_client, False)
p.find_by_id(id=base_operation.profiles["new_profile"].as_dict()["user_id"]["value"])
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_base_operation_object_with_signature_testing_it_should_fail(self, verify_sigs, verify_pubs):
verify_sigs.return_value = False
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "True"
patched_profile = self.mr_mozilla_profile
patched_profile["first_name"]["value"] = "anupdatedfirstname"
kinesis_event = kinesis_event_generate(patched_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is False
@patch.object(profile.User, "verify_all_publishers")
@patch.object(profile.User, "verify_all_signatures")
def test_new_user_scenario(self, verify_sigs, verify_pubs):
verify_sigs.return_value = False
verify_pubs.return_value = True
os.environ["CIS_PROCESSOR_VERIFY_SIGNATURES"] = "True"
new_user_profile = fake_profile.FakeUser().as_dict()
new_user_profile["user_id"]["value"] = "harrypotter"
kinesis_event = kinesis_event_generate(new_user_profile)
from cis_processor import operation
for kinesis_record in kinesis_event["Records"]:
base_operation = operation.BaseProcessor(
event_record=kinesis_record, dynamodb_client=self.dynamodb_client, dynamodb_table=self.table
)
base_operation._load_profiles()
needs_integration = base_operation.needs_integration(
base_operation.profiles["new_profile"], base_operation.profiles["old_profile"]
)
assert needs_integration is True
assert (
base_operation.profiles["new_profile"].verify_all_publishers(base_operation.profiles["old_profile"])
is True
)
assert base_operation.process() is False
| mpl-2.0 |
manuella/Carnaval | carnaval/smb/SMB_URL.py | 1 | 22221 | # -*- coding: utf-8 -*-
# ============================================================================ #
# SMB_URL.py
#
# Copyright:
# Copyright (C) 2015 by Christopher R. Hertel
#
# $Id: SMB_URL.py; 2016-02-16 21:27:36 -0600; Christopher R. Hertel$
#
# ---------------------------------------------------------------------------- #
#
# Description:
# SMB URL composition and parsing.
#
# ---------------------------------------------------------------------------- #
#
# License:
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# See Also:
# The 0.README file included with the distribution.
#
# ---------------------------------------------------------------------------- #
# This code was developed in participation with the
# Protocol Freedom Information Foundation.
# <www.protocolfreedom.org>
# ---------------------------------------------------------------------------- #
#
# Notes:
#
# - The first attempt at writing this module failed because it relied on
# Python urlparse.urlsplit() function, which had a known bug. See:
# http://bugs.python.org/issue9374
# Essentially, when dealing with an unrecognized URI scheme, the
# urlparse functions would fail to parse the query and fragment parts,
# leaving them as part of the path. This bug existed in Python 2.7.3,
# but was fixed some time before the release of 2.7.5. It just seemed
# easier to do the parsing by hand.
#
# - The SMB URL specification has never moved beyond the draft stage
# (despite a great deal of effort). As of January 2015, the most recent
# attempt is still draft-crhertel-smb-url-12.txt, which expired in mid
# 2007.
#
# Although there is no official standard, there are several SMB client
# implementations that do support some variant of the SMB URL. Most of
# these vary slightly (and differently) from the draft specification.
#
# - The original author of this SMB URL parsing implementation is also the
# author of the SMB URL draft specifications. So there.
#
# - One thing potentially missing from the SMB URL draft specification is
# the ability, within the URL string, to specify an offset/length pair.
# This would be useful for reading chunks of a given file. I do not
# know whether a similar feature exists in other URL formats.
#
# ============================================================================ #
#
"""Carnaval Toolkit: SMB URL format parsing and packing.
The SMB URL is (was) a proposed URL format for use in identifying SMB
shares, folders, and files within SMB shares. Basically, anything
within the namespace of an SMB share, including the share itself. The
SMB URL can also be used to access the (now deprecated) Browse Service
(A.K.A. the "Network Neighborhood"). Several SMB client
implementations support the SMB URL.
This module provides the smb_url() class, which can parse, modify, and
compose SMB URL strings.
References:
[SMBURL]
Draft SMB URL specification (expired):
http://www.ietf.org/archive/id/draft-crhertel-smb-url-12.txt
[IMPCIFS]
SMB URL description:
http://www.ubiqx.org/cifs/Appendix-D.html
[PYURLPARSE]
Python urlparse module documentation:
SMBhttps://docs.python.org/2/library/urlparse.html
[RFC2732]
Format for Literal IPv6 Addresses in URL's:
https://www.ietf.org/rfc/rfc2732.txt
"""
# Imports -------------------------------------------------------------------- #
#
# SMB_Core - We use the SMBerror exception class.
#
from SMB_Core import SMBerror
# Functions ------------------------------------------------------------------ #
def parseContext( context="" ):
"""Parse an SMB URL formatted NBT context into key/value tuples.
Input:
context - A string of key/value pairs. The pairs are separated
from one another by either the '&' or the ';' character.
The keys are separated from their respective values by an
equal sign ('=').
Output: A list of tuples, or None.
None is returned if the input generated an empty list.
If a list of tuples is returned, each tuple will contain one
(key, value) pair.
Doctest:
>>> print parseContext( "? a=1;&b=2; c =3; &" )
[('a', '1'), ('b', '2'), ('c', '3')]
"""
if( not context ):
return( None )
# This code is too forgiving.
# It does a lot of cleanup as it parses the input string
# and formats the resulting tuples.
context = context.lstrip( " ?&;" ).rstrip( "&;" )
ctxlist = []
for y in [ x.split( '&' ) for x in context.split( ';' ) ]:
for pair in y:
if( pair.strip() ):
key, value = pair.split( '=', 1 )
key = key.strip()
if( key ):
ctxlist.append( (key, value) )
return( ctxlist if( ctxlist ) else None )
def composeContext( contextList=[] ):
"""Create an NBT context string from a list of key/value pairs.
Input:
contextList - A list of key/value tuples, such as might be produced
by calling parseContext().
Output: A string, or None.
None is returned if the the context string would have been
empty, otherwise a formatted context string is returned. The
string will not include a leading question mark ('?').
Errors:
AssertionError - Thrown if the input is neither None nor a list.
Other exceptions may be thrown if the contents of the <contextList>
cannot be parsed properly by Python.
Doctest:
>>> clst = [('a', '1'), ('b', '2'), ('c', '3')]
>>> print composeContext( clst )
a=1;b=2;c=3
>>> print composeContext( [] )
None
"""
if( not contextList ):
return( None )
assert( type( contextList ) is list ), \
"Expected a list of tuples, not a(n) %s" % type( contextList ).__name__
ctx = ''
for tup in contextList:
ctx += ( ";%s=%s" % tup )
return( ctx[1:] if( ctx ) else None )
# Classes -------------------------------------------------------------------- #
#
class smb_url( object ):
"""Compose and decompose SMB URL strings.
The SMB URL has the following syntax:
smb://[[[authdomain;]username@]hostname[:port][/pathname]][?context]
The SMB URL format is semantically overloaded. Many strings that
match the format shown above may represent either an object in the
SMB file system or a query to be passed to the NetBIOS-based Browse
Service.
Class Properties:
scheme This is the scheme identifier. When read, it always
returns the string "smb". The value cannot be changed.
Note that the draft specifications allow either "smb" or
"cifs" as a scheme identifier. The CIFS name is being
retired, however, so this module only recognizes "smb".
authdomain The authentication domain. This is a string value.
username The username for authentication; a string.
password It is generally considered to be unwise to include a
password in a visible URL string, and supporting parsing
of the password may be considered enablement, but this
usage is fairly common. The password is generally
parsed as a subfield of the username, delimited by a
colon (':').
hostname This may be either a name or an IP address. An IPv6
address must match the syntax given in RFC 2732. A name
may be either an SMB server or workgroup identifier.
port Port number. The standard ports for SMB are 139 (for
SMB over NBT Session Service) and 445 (for SMB over
Naked TCP transport). This property is an integer type,
all of the others are strings.
path The pathname. The first component of the pathname, if
it exists, is the share name. The object identified by
a pathname may be a share, a file, a directory, or some
other SMB filesystem object such as a device or a link.
context A set of key/value pairs, used to provide NBT context.
Keys and values are separated by equal signs ('=').
Key/value pairs are separated by semicolons (or by
ampersands). Eg.: "Key1=ValueA;key2=valueB;fred=ethel"
url The complete URL string. Reading this property causes
the URL to be composed from the other fields. Setting
this value causes all of the other fields to be reset
and then filled in by parsing the URL.
Except for the scheme name, any of the above can be set to None to
clear the value.
Errors:
AssertionError - Values assigned to properties may be tested to
ensure that they meet type or value requirements.
If a value is found to be wanting, we'll let you
know.
AttributeError - Thrown if an attempt is made to assign a value to
the <scheme> property, which is read-only.
ValueError - Thrown by Python if it detects bogus input that
was otherwise missed. For instance, if an attempt
is made to assign a non-numeric string to the
<port> property, which stores an integer value.
Notes:
It is completely possible to generate a bogus SMB URL string using
this class. The initial parsing is somewhat picky (yet also
somewhat forgiving) about correct syntax. Several of the property
assignments also perform syntax checks. You can bypass the syntax
checks by assigning the _<field> attributes directly. There are,
however, a few additional checks performed when the resulting URL
is composed.
Doctest:
>>> x = smb_url( "smb://fooberry" )
>>> x.url
'smb://fooberry'
>>> x.path = "/"
>>> x.url
'smb://fooberry/'
>>> x.context = " ? Froo=froo; groo=Groo "
>>> x.path = "/Foo hammer/gizmogram/peas.pie"
>>> x.url
'smb://fooberry/Foo hammer/gizmogram/peas.pie?Froo=froo;groo=Groo '
>>> x.username = "chesspieceface"
>>> x.context = "calling=me&called=you"
>>> x.path = "/hamster"
>>> print x.url
smb://chesspieceface@fooberry/hamster?calling=me;called=you
"""
def __init__( self, url=None ):
"""Create an SMB_url() object.
Input:
url - An SMB URL string, or None.
None is equivalent to "smb://", which represents a local
Browse Service (Network Neighborhood) query.
Errors: See the parse() method.
"""
self.parse( url )
def reset( self ):
"""Clear the URL, retaining only the scheme (which is assumed).
Doctest:
>>> x = smb_url( "//hello/whirld" )
>>> x.authdomain = "ploobis"
>>> x.url
'smb://ploobis;hello/whirld'
>>> x.reset()
>>> x.url
'smb://'
"""
self._authdomain = None
self._username = None
self._password = None
self._hostname = None
self._port = None
self._path = None
self._context = None
self._url = None
def parse( self, url=None ):
"""Parse an SMB URL into its component parts.
Input:
url - An SMB URL string, or None.
None is equivalent to "smb://", which represents a
local Browse Service (Network Neighborhood) query.
Errors:
SMBerror( 1000 ) - Warning.
This warning is thrown if, for example, the
input contains a URL fragment (which is not
a supported field in the SMB URL format).
A warning can be safely ignored. Warnings
are not thrown until the URL string has been
successfully parsed, and all attributes
assigned.
SMBerror( 1001 ) - A syntax error was encountered while parsing
the given URL string. The <value> attribute
of the exception will contain the offset
within the URL string at which the error was
detected.
ValueError - Thrown if the port number field could not be
interpreted as an integer.
Notes: This method resets the contents of the object to the empty
state before parsing the new URL string. All previous
state is cleared. It's a do-over.
Doctest:
>>> x = smb_url( "smb://ad;un:pw@host/share/path/file.ext" )
>>> x.context = "nbns=172.28.42.88&nodetype=H;scope=gorch;"
>>> print x.path
/share/path/file.ext
>>> x.path = ''
>>> print x.url
smb://ad;un:pw@host?nbns=172.28.42.88;nodetype=H;scope=gorch
>>> x.authdomain = None
>>> print smb_url( x.url ).url
smb://un:pw@host?nbns=172.28.42.88;nodetype=H;scope=gorch
>>> try:
... smb_url( "smb://#NoGood" )
... except SMBerror, e:
... tup = (e.eCode, e.errStr( e.eCode ), e.value)
... print "%d; %s (%d)" % tup
1000; Warning (6)
"""
# Reset the attributes, and clean up the input a bit.
self.reset()
if( not url ):
return
tmp = url.lstrip() # Clean up the url string.
pos = url.find( tmp ) # How much padding did we remove?
# Brute-force parsing. Split off the "smb://" part.
scheme, delim, tmp = tmp.partition( "//" )
if( not delim ):
raise SMBerror( 1001, "Missing initial double slash ('//')", pos )
if( scheme ):
pos += len( scheme )
if( scheme.lower() not in [ "smb", "smb:" ] ):
raise SMBerror( 1001, "Invalid scheme: '%s'" % scheme, pos )
# Trim off the fragment, if one exists. (Throw an exception later.)
tmp, x, fragment = tmp.partition( '#' )
# Trim off the context (query) portion, if it's there.
tmp, x, context = tmp.partition( '?' )
# Now we can split the hierarchical portion into netloc and path.
netloc, x, path = tmp.partition( '/' )
# Further parse the netloc portion.
username, x, hostname = netloc.rpartition( '@' )
authdomain, x, username = username.rpartition( ';' )
username, x, password = username.partition( ':' )
hostname, x, port = hostname.partition( ':' )
# Should have everything. Check for bugs.
if( path and not hostname ):
pos = 2 + url.find( "//" )
raise SMBerror( 1001, "Path provided, but no hostname given", pos )
# Further error checking is done by the property assignment methods.
self.port = port
self.hostname = hostname
self.password = password
self.path = path
self.context = context
self.username = username
self.authdomain = authdomain
# Now that everything has been parsed and assigned,
# see if we need to throw any warnings.
if( fragment ):
s = "Fragments have no meaning in the SMB URL format"
raise SMBerror( 1000, s, url.rfind( '#' ) )
def compose( self ):
"""Create an SMB URL string from the component parts.
Output: An SMB URI string, built from the available attributes.
"""
# Validate.
if( self._path and not self._hostname ):
raise SMBerror( 1001, "Pathname given, but no hostname specified" )
ad = "" if( not self._authdomain ) else (self._authdomain + ';')
pw = "" if( not self._password ) else (':' + self._password)
un = "" if( not self._username ) else ("%s%s@" % (self._username, pw))
hn = "" if( not self._hostname ) else self._hostname
po = "" if( not self._port ) else (":%d" % self._port)
pa = "" if( not self._path ) else self._path
cx = "" if( not self._context ) else ('?' + self._context)
self._url = "smb://%s%s%s%s%s%s" % (ad, un, hn, po, pa, cx)
return( self._url )
def dump( self, indent=0 ):
"""Return a printable string listing the URL field contents.
Input: indent - Number of spaces to indent the formatted output.
Output: A string with a user-readable list of property contents.
Doctest:
>>> s ="//ad;un:pw@hn:139/share/path/file.ext?SCOPE=scope.id"
>>> print smb_url( s ).dump()
Scheme...............: "smb"
Authentication Domain: "ad"
Username.............: "un"
Password.............: "pw"
Hostname.............: "hn"
Port Number..........: 139
Share Path...........: "/share/path/file.ext"
Context..............: "SCOPE=scope.id"
<BLANKLINE>
"""
def fmat( val=None ):
# Output prettying function thingy.
if( val is None ):
return( "" )
if( str == type( val ) ):
return( '"%s"' % val )
return( str( val ) )
ind = ' ' * indent
s = ind + 'Scheme...............: "%s"\n' % self.scheme
s += ind + "Authentication Domain: %s\n" % fmat( self._authdomain )
s += ind + "Username.............: %s\n" % fmat( self._username )
s += ind + "Password.............: %s\n" % fmat( self._password )
s += ind + "Hostname.............: %s\n" % fmat( self._hostname )
s += ind + "Port Number..........: %s\n" % fmat( self._port )
s += ind + "Share Path...........: %s\n" % fmat( self._path )
s += ind + "Context..............: %s\n" % fmat( self._context )
return( s )
def _cleanStrField( self, fld ):
# Validate the input string.
#
# Input: fld - Either None, or a string representing a field within
# an SMB URL.
#
# Output: If <fld> is None or the empty string, this method will
# return None. Otherwise, if no errors are detected, this
# method just returns the input string.
#
# Errors: AssertionError - Thrown if the input is neither a string
# nor None.
#
# Notes: This method was originally intended to do all sorts of
# string cleanup before returning the input value. It was
# decided, however, that it was better to let higher-level
# processing catch any syntactic or semantic errors in the
# input values.
#
assert( (fld is None) or isinstance( fld, str ) ), \
"Expected a string, not a(n) %s" % type( fld ).__name__
return( fld if( fld ) else None )
@property
def scheme( self ):
"""Always returns "smb".
"""
return( "smb" ) # Self-conscious little waste of code...
@property
def authdomain( self ):
"""Get/set the Authentication Domain; string
"""
return( self._authdomain )
@authdomain.setter
def authdomain( self, ad=None ):
self._authdomain = self._cleanStrField( ad )
@property
def username( self ):
"""Get/set the Username; string
"""
return( self._username )
@username.setter
def username( self, un=None ):
self._username = self._cleanStrField( un )
@property
def password( self ):
"""Get/set the (optional and discouraged) password; string
Notes: The password field a subfield of username. Use with
caution; don't expose passwords.
"""
return( self._password )
@password.setter
def password( self, pw=None ):
self._password = self._cleanStrField( pw )
@property
def hostname( self ):
"""Get/set the Hostname (server identifier or workgroup name); string
"""
return( self._hostname )
@hostname.setter
def hostname( self, hn=None ):
self._hostname = self._cleanStrField( hn )
@property
def port( self ):
"""Get/set the port number; integer
Errors:
ValueError - Thrown if the input is not None and cannot be
converted to an integer. The port number can
be set to None.
AssertionError - Thrown if the input, converted to an integer,
is outside of the range of an unsigned short.
(uint16_t).
"""
return( self._port )
@port.setter
def port( self, po=None ):
if( (not po) and (type( po ) is not int) ):
self._port = None
else:
ponum = int( po )
assert( (0 <= ponum) and (ponum <= 0xFFFF) ), \
"The given port number is outside the valid range (0..65535)."
self._port = int( po )
@property
def path( self ):
"""Get/set the path; string
"""
return( self._path )
@path.setter
def path( self, pa=None ):
if( not pa.strip() ):
self._path = None
else:
assert( isinstance( pa, str ) ), \
"Expected a pathname string, not a(n) %s" % type( pa ).__name__
self._path = '/' + pa.lstrip( '/' )
@property
def context( self ):
"""Get/set the NBT context key/value pairs; string
See Also: <parseContext>, <composeContext>
"""
return( self._context )
@context.setter
def context( self, cx=None ):
assert( (cx is None) or isinstance( cx, str ) ), \
"Expected a context string, not a(n) %s" % type( cx ).__name__
self._context = composeContext( parseContext( cx ) )
@property
def url( self ):
"""Get/set the SMB URL; string
Notes: Reading the url forcess it to be composed from the available
fields (thus ensuring that it is up to date).
"""
return( self.compose() )
@url.setter
def url( self, url=None ):
self.parse( url )
# ============================================================================ #
| agpl-3.0 |
yuhcaesar/emacsrc | .emacs.d/.python-environments/default/Lib/sre_parse.py | 4 | 28262 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name %r" %
name)
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in backref group name "
"%r" % name)
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "missing group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
| gpl-2.0 |
qiuzhong/crosswalk-test-suite | stability/stability-overload-android-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
areski/django | tests/proxy_models/models.py | 45 | 4514 | """
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_queryset(self):
return super(PersonManager, self).get_queryset().exclude(name="fred")
class SubManager(models.Manager):
def get_queryset(self):
return super(SubManager, self).get_queryset().exclude(name="wilma")
@python_2_unicode_compatible
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __str__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
permissions = (
("display_users", "May display users information"),
)
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
def __str__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
@python_2_unicode_compatible
class BaseUser(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return ':'.join((self.__class__.__name__, self.name,))
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
@python_2_unicode_compatible
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(ProxyTrackerUser, models.CASCADE, related_name='issues')
def __str__(self):
return ':'.join((self.__class__.__name__, self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser, models.CASCADE)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser, models.CASCADE)
associated_bug = models.ForeignKey(ProxyProxyBug, models.CASCADE)
class ProxyImprovement(Improvement):
class Meta:
proxy = True
| bsd-3-clause |
skbkontur/Diamond | src/diamond/handler/mysql.py | 39 | 2931 | # coding=utf-8
"""
Insert the collected values into a mysql table
"""
from Handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException, e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
| mit |
dfdx2/django | tests/extra_regress/tests.py | 33 | 16002 | import datetime
from collections import OrderedDict
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Order, RevisionableModel, TestObject
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="fred@example.com"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(
qs, [('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(
qs2, [
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(
qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
(User.objects
.extra(select={"alpha": "%s"}, select_params=(1,))
.extra(select={"beta": "%s"}, select_params=(2,))[0].alpha),
1
)
self.assertEqual(
(User.objects
.extra(select={"beta": "%s"}, select_params=(1,))
.extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha),
2
)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,))
.filter(id=self.u.id).values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimization. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
(Order.objects
.extra(where=["username=%s"], params=["fred"], tables=["auth_user"])
.order_by('created_by')),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertSequenceEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'),
[datetime.datetime(2008, 9, 1, 0, 0)],
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values()
),
[{
'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first',
'id': obj.pk, 'first': 'first'
}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(
TestObject.objects
.values()
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[{
'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first',
'id': obj.pk, 'first': 'first'
}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('first', 'second')
),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(
TestObject.objects
.values('first', 'second')
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('first', 'second', 'foo')
),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values('foo', 'whiz')
),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list()
),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(
TestObject.objects
.values_list()
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'second')
),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(
TestObject.objects
.values_list('first', 'second')
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
),
[('first', 'second')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('second', flat=True)
),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'second', 'whiz')
),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('foo', 'whiz')
),
[('first', 'third')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', flat=True)
),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', 'foo')
),
[('third', 'first')]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('first', 'id')
),
[('first', obj.pk)]
)
self.assertEqual(
list(
TestObject.objects
.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))
.values_list('whiz', 'first', 'bar', 'id')
),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
def test_extra_values_distinct_ordering(self):
t1 = TestObject.objects.create(first='a', second='a', third='a')
t2 = TestObject.objects.create(first='a', second='b', third='b')
qs = TestObject.objects.extra(
select={'second_extra': 'second'}
).values_list('id', flat=True).distinct()
self.assertSequenceEqual(qs.order_by('second_extra'), [t1.pk, t2.pk])
self.assertSequenceEqual(qs.order_by('-second_extra'), [t2.pk, t1.pk])
# Note: the extra ordering must appear in select clause, so we get two
# non-distinct results here (this is on purpose, see #7070).
self.assertSequenceEqual(qs.order_by('-second_extra').values_list('first', flat=True), ['a', 'a'])
| bsd-3-clause |
owers19856/django-cms | cms/south_migrations/0075_use_structure.py | 18 | 20428 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.db import models
from django.db.transaction import set_autocommit
from south.db import db
from south.v2 import DataMigration
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(DataMigration):
def forwards(self, orm):
if connection.vendor == 'sqlite':
set_autocommit(True)
ph_model = orm['cms.Placeholder']
page_model = orm['cms.Page']
user_model = orm[settings.AUTH_USER_MODEL]
try:
ph_ctype = ContentType.objects.get(app_label=ph_model._meta.app_label, model=ph_model._meta.model_name)
page_ctype = ContentType.objects.get(app_label=page_model._meta.app_label, model=page_model._meta.model_name)
permission, _ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
page_permission = Permission.objects.get(codename='change_page', content_type=page_ctype)
for user in user_model.objects.filter(is_superuser=False, is_staff=True):
if user.has_perm("cms.change_page"):
user.user_permissions.add(permission)
for group in Group.objects.all():
if page_permission in group.permissions.all():
group.permissions.add(permission)
except ContentType.DoesNotExist:
print(u'Cannot migrate users to use_structure permission, please add the permission manually')
def backwards(self, orm):
ph_model = orm['cms.Placeholder']
user_model = orm[settings.AUTH_USER_MODEL]
ph_ctype = ContentType.objects.get(app_label=ph_model._meta.app_label, model=ph_model._meta.model_name)
permission, _ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
for user in user_model.objects.filter(is_superuser=False, is_staff=True):
if user.has_perm("cms.use_structure"):
user.user_permissions.remove(permission)
for group in Group.objects.all():
if permission in group.permissions.all():
group.permissions.remove(permission)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('path',)", 'unique_together': "(('publisher_is_draft', 'site', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
cynicaldevil/servo | components/script/dom/bindings/codegen/parser/tests/test_interface_maplikesetlikeiterable.py | 74 | 21227 | import WebIDL
import traceback
def WebIDLTest(parser, harness):
def shouldPass(prefix, iface, expectedMembers, numProductions=1):
p = parser.reset()
p.parse(iface)
results = p.finish()
harness.check(len(results), numProductions,
"%s - Should have production count %d" % (prefix, numProductions))
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"%s - Should be an IDLInterface" % (prefix))
# Make a copy, since we plan to modify it
expectedMembers = list(expectedMembers)
for m in results[0].members:
name = m.identifier.name
if (name, type(m)) in expectedMembers:
harness.ok(True, "%s - %s - Should be a %s" % (prefix, name,
type(m)))
expectedMembers.remove((name, type(m)))
else:
harness.ok(False, "%s - %s - Unknown symbol of type %s" %
(prefix, name, type(m)))
# A bit of a hoop because we can't generate the error string if we pass
if len(expectedMembers) == 0:
harness.ok(True, "Found all the members")
else:
harness.ok(False,
"Expected member not found: %s of type %s" %
(expectedMembers[0][0], expectedMembers[0][1]))
return results
def shouldFail(prefix, iface):
try:
p = parser.reset()
p.parse(iface)
p.finish()
harness.ok(False,
prefix + " - Interface passed when should've failed")
except WebIDL.WebIDLError, e:
harness.ok(True,
prefix + " - Interface failed as expected")
except Exception, e:
harness.ok(False,
prefix + " - Interface failed but not as a WebIDLError exception: %s" % e)
iterableMembers = [(x, WebIDL.IDLMethod) for x in ["entries", "keys",
"values", "forEach"]]
setROMembers = ([(x, WebIDL.IDLMethod) for x in ["has"]] +
[("__setlike", WebIDL.IDLMaplikeOrSetlike)] +
iterableMembers)
setROMembers.extend([("size", WebIDL.IDLAttribute)])
setRWMembers = ([(x, WebIDL.IDLMethod) for x in ["add",
"clear",
"delete"]] +
setROMembers)
setROChromeMembers = ([(x, WebIDL.IDLMethod) for x in ["__add",
"__clear",
"__delete"]] +
setROMembers)
setRWChromeMembers = ([(x, WebIDL.IDLMethod) for x in ["__add",
"__clear",
"__delete"]] +
setRWMembers)
mapROMembers = ([(x, WebIDL.IDLMethod) for x in ["get", "has"]] +
[("__maplike", WebIDL.IDLMaplikeOrSetlike)] +
iterableMembers)
mapROMembers.extend([("size", WebIDL.IDLAttribute)])
mapRWMembers = ([(x, WebIDL.IDLMethod) for x in ["set",
"clear",
"delete"]] + mapROMembers)
mapRWChromeMembers = ([(x, WebIDL.IDLMethod) for x in ["__set",
"__clear",
"__delete"]] +
mapRWMembers)
# OK, now that we've used iterableMembers to set up the above, append
# __iterable to it for the iterable<> case.
iterableMembers.append(("__iterable", WebIDL.IDLIterable))
valueIterableMembers = [("__iterable", WebIDL.IDLIterable)]
valueIterableMembers.append(("__indexedgetter", WebIDL.IDLMethod))
valueIterableMembers.append(("length", WebIDL.IDLAttribute))
disallowedIterableNames = ["keys", "entries", "values"]
disallowedMemberNames = ["forEach", "has", "size"] + disallowedIterableNames
mapDisallowedMemberNames = ["get"] + disallowedMemberNames
disallowedNonMethodNames = ["clear", "delete"]
mapDisallowedNonMethodNames = ["set"] + disallowedNonMethodNames
setDisallowedNonMethodNames = ["add"] + disallowedNonMethodNames
#
# Simple Usage Tests
#
shouldPass("Iterable (key only)",
"""
interface Foo1 {
iterable<long>;
readonly attribute unsigned long length;
getter long(unsigned long index);
};
""", valueIterableMembers)
shouldPass("Iterable (key and value)",
"""
interface Foo1 {
iterable<long, long>;
};
""", iterableMembers,
# numProductions == 2 because of the generated iterator iface,
numProductions=2)
shouldPass("Maplike (readwrite)",
"""
interface Foo1 {
maplike<long, long>;
};
""", mapRWMembers)
shouldPass("Maplike (readwrite)",
"""
interface Foo1 {
maplike<long, long>;
};
""", mapRWMembers)
shouldPass("Maplike (readonly)",
"""
interface Foo1 {
readonly maplike<long, long>;
};
""", mapROMembers)
shouldPass("Setlike (readwrite)",
"""
interface Foo1 {
setlike<long>;
};
""", setRWMembers)
shouldPass("Setlike (readonly)",
"""
interface Foo1 {
readonly setlike<long>;
};
""", setROMembers)
shouldPass("Inheritance of maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
};
""", mapRWMembers, numProductions=2)
shouldPass("Implements with maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
};
Foo2 implements Foo1;
""", mapRWMembers, numProductions=3)
shouldPass("JS Implemented maplike interface",
"""
[JSImplementation="@mozilla.org/dom/test-interface-js-maplike;1",
Constructor()]
interface Foo1 {
setlike<long>;
};
""", setRWChromeMembers)
shouldPass("JS Implemented maplike interface",
"""
[JSImplementation="@mozilla.org/dom/test-interface-js-maplike;1",
Constructor()]
interface Foo1 {
maplike<long, long>;
};
""", mapRWChromeMembers)
#
# Multiple maplike/setlike tests
#
shouldFail("Two maplike/setlikes on same interface",
"""
interface Foo1 {
setlike<long>;
maplike<long, long>;
};
""")
shouldFail("Two iterable/setlikes on same interface",
"""
interface Foo1 {
iterable<long>;
maplike<long, long>;
};
""")
shouldFail("Two iterables on same interface",
"""
interface Foo1 {
iterable<long>;
iterable<long, long>;
};
""")
shouldFail("Two maplike/setlikes in partials",
"""
interface Foo1 {
maplike<long, long>;
};
partial interface Foo1 {
setlike<long>;
};
""")
shouldFail("Conflicting maplike/setlikes across inheritance",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
setlike<long>;
};
""")
shouldFail("Conflicting maplike/iterable across inheritance",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
iterable<long>;
};
""")
shouldFail("Conflicting maplike/setlikes across multistep inheritance",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
setlike<long>;
};
""")
shouldFail("Consequential interface with conflicting maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
setlike<long>;
};
Foo2 implements Foo1;
""")
shouldFail("Consequential interfaces with conflicting maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
setlike<long>;
};
interface Foo3 {
};
Foo3 implements Foo1;
Foo3 implements Foo2;
""")
#
# Member name collision tests
#
def testConflictingMembers(likeMember, conflictName, expectedMembers, methodPasses):
"""
Tests for maplike/setlike member generation against conflicting member
names. If methodPasses is True, this means we expect the interface to
pass in the case of method shadowing, and expectedMembers should be the
list of interface members to check against on the passing interface.
"""
if methodPasses:
shouldPass("Conflicting method: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s;
[Throws]
void %s(long test1, double test2, double test3);
};
""" % (likeMember, conflictName), expectedMembers)
else:
shouldFail("Conflicting method: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s;
[Throws]
void %s(long test1, double test2, double test3);
};
""" % (likeMember, conflictName))
# Inherited conflicting methods should ALWAYS fail
shouldFail("Conflicting inherited method: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
void %s(long test1, double test2, double test3);
};
interface Foo2 : Foo1 {
%s;
};
""" % (conflictName, likeMember))
shouldFail("Conflicting static method: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s;
static void %s(long test1, double test2, double test3);
};
""" % (likeMember, conflictName))
shouldFail("Conflicting attribute: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s
attribute double %s;
};
""" % (likeMember, conflictName))
shouldFail("Conflicting const: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s;
const double %s = 0;
};
""" % (likeMember, conflictName))
shouldFail("Conflicting static attribute: %s and %s" % (likeMember, conflictName),
"""
interface Foo1 {
%s;
static attribute long %s;
};
""" % (likeMember, conflictName))
for member in disallowedIterableNames:
testConflictingMembers("iterable<long, long>", member, iterableMembers, False)
for member in mapDisallowedMemberNames:
testConflictingMembers("maplike<long, long>", member, mapRWMembers, False)
for member in disallowedMemberNames:
testConflictingMembers("setlike<long>", member, setRWMembers, False)
for member in mapDisallowedNonMethodNames:
testConflictingMembers("maplike<long, long>", member, mapRWMembers, True)
for member in setDisallowedNonMethodNames:
testConflictingMembers("setlike<long>", member, setRWMembers, True)
shouldPass("Inheritance of maplike/setlike with child member collision",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
void entries();
};
""", mapRWMembers, numProductions=2)
shouldPass("Inheritance of multi-level maplike/setlike with child member collision",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
void entries();
};
""", mapRWMembers, numProductions=3)
shouldFail("Interface with consequential maplike/setlike interface member collision",
"""
interface Foo1 {
void entries();
};
interface Foo2 {
maplike<long, long>;
};
Foo1 implements Foo2;
""")
shouldFail("Maplike interface with consequential interface member collision",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
void entries();
};
Foo1 implements Foo2;
""")
shouldPass("Consequential Maplike interface with inherited interface member collision",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
void entries();
};
interface Foo3 : Foo2 {
};
Foo3 implements Foo1;
""", mapRWMembers, numProductions=4)
shouldPass("Inherited Maplike interface with consequential interface member collision",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 {
void entries();
};
interface Foo3 : Foo1 {
};
Foo3 implements Foo2;
""", mapRWMembers, numProductions=4)
shouldFail("Inheritance of name collision with child maplike/setlike",
"""
interface Foo1 {
void entries();
};
interface Foo2 : Foo1 {
maplike<long, long>;
};
""")
shouldFail("Inheritance of multi-level name collision with child maplike/setlike",
"""
interface Foo1 {
void entries();
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
maplike<long, long>;
};
""")
shouldPass("Inheritance of attribute collision with parent maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
attribute double size;
};
""", mapRWMembers, numProductions=2)
shouldPass("Inheritance of multi-level attribute collision with parent maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
attribute double size;
};
""", mapRWMembers, numProductions=3)
shouldFail("Inheritance of attribute collision with child maplike/setlike",
"""
interface Foo1 {
attribute double size;
};
interface Foo2 : Foo1 {
maplike<long, long>;
};
""")
shouldFail("Inheritance of multi-level attribute collision with child maplike/setlike",
"""
interface Foo1 {
attribute double size;
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
maplike<long, long>;
};
""")
shouldFail("Inheritance of attribute/rw function collision with child maplike/setlike",
"""
interface Foo1 {
attribute double set;
};
interface Foo2 : Foo1 {
maplike<long, long>;
};
""")
shouldFail("Inheritance of const/rw function collision with child maplike/setlike",
"""
interface Foo1 {
const double set = 0;
};
interface Foo2 : Foo1 {
maplike<long, long>;
};
""")
shouldPass("Inheritance of rw function with same name in child maplike/setlike",
"""
interface Foo1 {
maplike<long, long>;
};
interface Foo2 : Foo1 {
void clear();
};
""", mapRWMembers, numProductions=2)
shouldFail("Inheritance of unforgeable attribute collision with child maplike/setlike",
"""
interface Foo1 {
[Unforgeable]
attribute double size;
};
interface Foo2 : Foo1 {
maplike<long, long>;
};
""")
shouldFail("Inheritance of multi-level unforgeable attribute collision with child maplike/setlike",
"""
interface Foo1 {
[Unforgeable]
attribute double size;
};
interface Foo2 : Foo1 {
};
interface Foo3 : Foo2 {
maplike<long, long>;
};
""")
shouldPass("Implemented interface with readonly allowable overrides",
"""
interface Foo1 {
readonly setlike<long>;
readonly attribute boolean clear;
};
""", setROMembers + [("clear", WebIDL.IDLAttribute)])
shouldPass("JS Implemented read-only interface with readonly allowable overrides",
"""
[JSImplementation="@mozilla.org/dom/test-interface-js-maplike;1",
Constructor()]
interface Foo1 {
readonly setlike<long>;
readonly attribute boolean clear;
};
""", setROChromeMembers + [("clear", WebIDL.IDLAttribute)])
shouldFail("JS Implemented read-write interface with non-readwrite allowable overrides",
"""
[JSImplementation="@mozilla.org/dom/test-interface-js-maplike;1",
Constructor()]
interface Foo1 {
setlike<long>;
readonly attribute boolean clear;
};
""")
r = shouldPass("Check proper override of clear/delete/set",
"""
interface Foo1 {
maplike<long, long>;
long clear(long a, long b, double c, double d);
long set(long a, long b, double c, double d);
long delete(long a, long b, double c, double d);
};
""", mapRWMembers)
for m in r[0].members:
if m.identifier.name in ["clear", "set", "delete"]:
harness.ok(m.isMethod(), "%s should be a method" % m.identifier.name)
harness.check(m.maxArgCount, 4, "%s should have 4 arguments" % m.identifier.name)
harness.ok(not m.isMaplikeOrSetlikeOrIterableMethod(),
"%s should not be a maplike/setlike function" % m.identifier.name)
| mpl-2.0 |
willprice/weboob | modules/cuisineaz/test.py | 4 | 1210 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
import itertools
class CuisineazTest(BackendTest):
MODULE = 'cuisineaz'
def test_recipe(self):
recipes = list(itertools.islice(self.backend.iter_recipes(u'purée'), 0, 20))
assert len(recipes)
for recipe in recipes:
full_recipe = self.backend.get_recipe(recipe.id)
assert full_recipe.instructions
assert full_recipe.ingredients
assert full_recipe.title
| agpl-3.0 |
genonfire/bbgo | boards/models.py | 1 | 4035 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
class Board(models.Model):
"""Board of boards"""
BOARD_STATUS = {
('1normal', _('status_normal')),
('2temp', _('status_temp')),
('3notice', _('status_notice')),
('4warning', _('status_warning')),
('5hidden', _('status_hidden')),
('6deleted', _('status_deleted')),
}
table = models.IntegerField(default=0)
status = models.CharField(max_length=10, choices=BOARD_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
category = models.CharField(max_length=23, blank=True)
subject = models.CharField(max_length=41)
content = models.TextField()
view_count = models.IntegerField(default=0)
reply_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
dislike_count = models.IntegerField(default=0)
like_users = models.ManyToManyField(
User, related_name="board_like_users", default='', blank=True)
dislike_users = models.ManyToManyField(
User, related_name="board_dislike_users", default='', blank=True)
reference = models.CharField(max_length=1855, default='', blank=True)
has_image = models.BooleanField(default=False)
has_video = models.BooleanField(default=False)
def get_absolute_url(self):
"""Back to list"""
return reverse_lazy('boards:show_list', args=[self.table, 1])
def get_article_url(self):
"""Back to article"""
return reverse_lazy('boards:show_article', args=[self.id])
def get_edit_url(self):
"""Stay editing"""
return reverse_lazy('boards:edit_article', args=[self.id])
def get_status_text(self):
"""Get status text"""
if self.status == '1normal':
return _('status_normal')
elif self.status == '2temp':
return _('status_temp')
elif self.status == '3notice':
return _('status_notice')
elif self.status == '4warning':
return _('status_warning')
elif self.status == '5hidden':
return _('status_hidden')
elif self.status == '6deleted':
return _('status_deleted')
def get_image_text(self):
"""Get image text"""
return '<img src="/upload/django-summernote/'
def get_video_text(self):
"""Get video text"""
return '<iframe frameborder="0" src="//www.youtube.com/'
class Reply(models.Model):
"""Reply of boards"""
REPLY_STATUS = {
('1normal', _('status_normal')),
('5hidden', _('status_hidden')),
('6deleted', _('status_deleted')),
}
article_id = models.IntegerField(default=0)
reply_id = models.IntegerField(default=0)
reply_to = models.CharField(max_length=150, default='', blank=True)
status = models.CharField(
max_length=10, choices=REPLY_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
content = models.TextField(max_length=settings.REPLY_TEXT_MAX)
image = models.ImageField(upload_to="reply-images/%Y-%m-%d/", blank=True)
like_count = models.IntegerField(default=0)
dislike_count = models.IntegerField(default=0)
like_users = models.ManyToManyField(
User, related_name="reply_like_users", default='', blank=True)
dislike_users = models.ManyToManyField(
User, related_name="reply_dislike_users", default='', blank=True)
| mit |
MadManRises/Madgine | shared/assimp/contrib/gtest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/python/kernel_tests/tensor_array_ops_test.py | 5 | 36967 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
class TensorArrayCPUTest(tf.test.TestCase):
_use_gpu = False
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.pack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def testTensorArrayWritePack(self):
self._testTensorArrayWritePack(tf.float32)
self._testTensorArrayWritePack(tf.float64)
self._testTensorArrayWritePack(tf.int32)
self._testTensorArrayWritePack(tf.int64)
self._testTensorArrayWritePack(tf.complex64)
self._testTensorArrayWritePack(tf.complex128)
self._testTensorArrayWritePack(tf.string)
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0],
[104.0, 105.0],
[204.0, 205.0],
[6.0, 7.0],
[106.0, 107.0],
[8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(tf.float32)
self._testTensorArrayWriteConcat(tf.float64)
self._testTensorArrayWriteConcat(tf.int32)
self._testTensorArrayWriteConcat(tf.int64)
self._testTensorArrayWriteConcat(tf.complex64)
self._testTensorArrayWriteConcat(tf.complex128)
self._testTensorArrayWriteConcat(tf.string)
def testTensorArrayUnpackWrongMajorSizeFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Input value must have first dimension "
r"equal to the array size \(2 vs. 3\)"):
ta.unpack([1.0, 2.0]).flow.eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 "
"because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).pack().eval()
def _testTensorArrayUnpackRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype is tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Unpack a vector into scalars
w0 = ta.unpack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors
w1 = ta.unpack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackRead(tf.float32)
self._testTensorArrayUnpackRead(tf.float64)
self._testTensorArrayUnpackRead(tf.int32)
self._testTensorArrayUnpackRead(tf.int64)
self._testTensorArrayUnpackRead(tf.complex64)
self._testTensorArrayUnpackRead(tf.complex128)
self._testTensorArrayUnpackRead(tf.string)
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
lengths = tf.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(tf.float32)
self._testTensorArraySplitRead(tf.float64)
self._testTensorArraySplitRead(tf.int32)
self._testTensorArraySplitRead(tf.int64)
self._testTensorArraySplitRead(tf.complex64)
self._testTensorArraySplitRead(tf.complex128)
self._testTensorArraySplitRead(tf.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run([
r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with tf.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle, g_ta_1.handle, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read(
handle=w0.handle, index=0, dtype=tf.int64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype int64."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = tf.placeholder(tf.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2, infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (tf.int32, tf.int64, tf.float32,
tf.float64, tf.complex64, tf.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def testDuplicateTensorArrayHasDifferentName(self):
with self.test_session(use_gpu=self._use_gpu) as session:
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c1 = h1.write(0, 4.0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c2 = h2.write(0, 5.0)
_, _, c1h, c2h = session.run([c1.flow, c2.flow, c1.handle, c2.handle])
c1h = [x.decode("ascii") for x in c1h]
c2h = [x.decode("ascii") for x in c2h]
self.assertEqual(c1h[0], "_tensor_arrays")
self.assertEqual(c2h[0], "_tensor_arrays")
self.assertTrue(c1h[1].startswith("foo_"))
self.assertTrue(c2h[1].startswith("foo_"))
self.assertNotEqual(c1h[1], c2h[1])
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.as_dtype(dtype), tensor_array_name="foo", size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = tf.constant(c([[4.0, 5.0]]))
value_1 = tf.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = tf.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = tf.gradients(
ys=[r0, r0_2], xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = tf.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = tf.gradients(
ys=[r0, r0_2, r1], xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32,
np.int64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value_0 = tf.constant([-1.0, 1.0])
value_1 = tf.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.pack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with tf.control_dependencies([p0, r0, s0]):
grad_r = tf.gradients(
ys=[p0, r0, s0], xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=self._use_gpu):
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unpack(value)
r0_readonce = w_readonce.read(0)
with tf.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unpack(value)
r0_readtwice = w_readtwice.read(0)
with tf.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r0_1, r1], xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = tf.gradients(
ys=[r], xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual(
[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=self._use_gpu) as session:
v0 = tf.identity(np.arange(3*5, dtype=np_dtype).reshape(3, 5))
var = tf.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = tf.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo",
size=0 if dynamic_size else 3, dynamic_size=dynamic_size)
time_0 = tf.identity(0)
def body(time, ta_t, state):
sliced = tf.slice(v0, begin=tf.pack([time, 0]), size=[1, -1])
sliced = tf.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time+1, ta_t, state)
(unused_0, h_final, unused_2) = tf.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
parallel_iterations=3)
vout = h_final.pack()
grad_val = -np.arange(3*5, dtype=np_dtype).reshape(3, 5)
v0_grad = tf.gradients([vout], [v0], [grad_val])[0]
state0_grad = tf.gradients([vout], [state0], [grad_val])[0]
var_grad = tf.gradients([vout], [var], [grad_val])[0]
tf.initialize_all_variables().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array([
state0_t,
state0_t + v0_t[0, :],
state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :],
grad_val[2, :]])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=tf.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=tf.float32)
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=self._use_gpu) as session:
a = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
b = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1 + 3*5)
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
g0 = -(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = tf.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = tf.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(tf.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo"))
self.assertEqual(
"gradients", self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual(
"foo/gradients:0", self._grad_source_for_name("foo/gradients"))
self.assertEqual(
"foo/gradients_0:0", self._grad_source_for_name("foo/gradients_0"))
self.assertEqual(
"foo/gradients", self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual(
"foo/gradients_0", self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual(
"foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual(
"foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c0 = tf.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = tf.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testUnpackShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unpack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = tf.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testSplitShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo1",
size=0, dynamic_size=True, infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.placeholder(tf.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testGradientWhenNotAllComponentsRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
x = tf.constant([2.0, 3.0])
w = ta.unpack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = tf.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testTensorArrayUnpackDynamic(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=3,
dynamic_size=True)
x = tf.constant([1.0, 2.0, 3.0])
w0 = ta.unpack(x)
w1 = w0.write(3, 4.0)
r = w1.pack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = tf.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
sess.run(grad)[0])
def testTensorArraySplitDynamic(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=3,
dynamic_size=True)
x = tf.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = tf.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
sess.run(grad)[0])
def testTensorArrayEvalEmpty(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=False,
infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
ta.pack().eval()
def testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=False,
infer_shape=True)
self.assertEqual(0, ta.size().eval())
ta.unpack(tf.zeros([0, 3, 5]))
self.assertAllEqual([0, 3, 5], ta.pack().eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
class TensorArrayGPUTest(TensorArrayCPUTest):
_use_gpu = True
if __name__ == "__main__":
tf.test.main()
| mit |
DTse/cm_kernel_lge_d620 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
patsissons/Flexget | tests/test_manipulate.py | 22 | 1990 | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestManipulate(FlexGetBase):
__yaml__ = """
tasks:
test_1:
mock:
- {title: 'abc FOO'}
manipulate:
- title:
replace:
regexp: FOO
format: BAR
test_2:
mock:
- {title: '1234 abc'}
manipulate:
- title:
extract: \d+\s*(.*)
test_multiple_edits:
mock:
- {title: 'abc def'}
manipulate:
- title:
replace:
regexp: abc
format: "123"
- title:
extract: \d+\s+(.*)
test_phase:
mock:
- {title: '1234 abc'}
manipulate:
- title:
phase: metainfo
extract: \d+\s*(.*)
test_remove:
mock:
- {title: 'abc', description: 'def'}
manipulate:
- description: { remove: yes }
"""
def test_replace(self):
self.execute_task('test_1')
assert self.task.find_entry('entries', title='abc BAR'), 'replace failed'
def test_extract(self):
self.execute_task('test_2')
assert self.task.find_entry('entries', title='abc'), 'extract failed'
def test_multiple_edits(self):
self.execute_task('test_multiple_edits')
assert self.task.find_entry('entries', title='def'), 'multiple edits on 1 field failed'
def test_phase(self):
self.execute_task('test_phase')
assert self.task.find_entry('entries', title='abc'), 'extract failed at metainfo phase'
def test_remove(self):
self.execute_task('test_remove')
assert 'description' not in self.task.find_entry('entries', title='abc'), 'remove failed'
| mit |
rigdenlab/ample | ample/testing/run_tests.py | 2 | 1720 | #!/usr/bin/env ccp4-python
import argparse
from ample.testing import integration_util, unittest_util
from ample.util.argparse_util import add_core_options, add_cluster_submit_options
__author__ = "Felix Simkovic"
__date__ = "25-Mar-2016"
def run_integration(argd):
m = integration_util.AMPLEIntegrationFramework(test_cases=argd['test_cases'], run_dir=argd['run_dir'])
if argd['clean']:
m.clean() if argd['test_cases'] else m.clean(clean_dir=True)
else:
m.run(**argd)
def run_unittest(argd):
m = unittest_util.AMPLEUnittestFramework()
m.run(buffer=argd['buffer'], cases=argd['test_cases'], verbosity=argd['verbosity'])
def main():
desc = """ccp4-python -m ample.testing <command> [<args>]
Available tests include:
integration Integration testing of typical Ample routines
unittest Unittesting of all Ample subroutines
"""
parser = argparse.ArgumentParser(prog="run_tests.py", usage=desc)
# Add options that work on all runtypes
add_core_options(parser)
suboptions = parser.add_subparsers(help="Testing framework options")
integ = suboptions.add_parser("integration", help="Integration testing with examples")
integ.set_defaults(which="integration")
integration_util.add_cmd_options(integ)
add_cluster_submit_options(integ)
unit = suboptions.add_parser("unittest", help="Unittest all functions")
unit.set_defaults(which="unittest")
unittest_util.add_cmd_options(unit)
argd = vars(parser.parse_args())
which_test = argd['which']
if which_test is 'integration':
run_integration(argd)
elif which_test is 'unittest':
run_unittest(argd)
if __name__ == "__main__":
main()
| bsd-3-clause |
christabor/MoAL | MOAL/data_structures/graphs/multigraph.py | 1 | 3056 | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h3
from MOAL.helpers import datamaker as dmkr
from MOAL.data_structures.graphs.graphs import Graph
import pygraphviz as pgv
DEBUG = True if __name__ == '__main__' else False
class MultiGraph(Graph):
def render_graph(self, filename, **kwargs):
"""Override the rendered, allowing color specification for
loops vs. normal edges"""
g = pgv.AGraph(**kwargs)
for name, data in self.vertices.iteritems():
g.add_node(name)
curr = g.get_node(name)
curr.attr['label'] = data['val'] if data['val'] else name
for edge in data['edges']:
if edge == name:
g.add_edge(str(edge), str(name), color='red')
else:
g.add_edge(str(edge), str(name), color='blue')
g.layout()
g.draw(filename)
class LooplessMultiGraph(MultiGraph):
"""From Wikipedia:
"Not all authors allow multigraphs to have loops."
"""
def __init__(self, vertices={}):
for vertex, data in vertices.iteritems():
if vertex in data['edges']:
raise ValueError(
'Loop "{}" is not allowed on vertices.'.format(vertex))
return super(LooplessMultiGraph, self).__init__(vertices=vertices)
def __setitem__(self, *args):
key, vertices = args
if key in vertices['edges']:
raise ValueError('Loop {} is not allowed on vertices.'.format(key))
return super(LooplessMultiGraph, self).__setitem__(*args)
if DEBUG:
with Section('Multi-graph'):
mgraph_rand = MultiGraph(dmkr.random_graph(max_edges=5))
print_h3('Random multi-graph')
print(mgraph_rand)
# Multiple edges pointing to each other
mgraph = MultiGraph({
0: {'edges': [1, 2, 3], 'val': 'A'},
1: {'edges': [0, 3, 2, 1], 'val': 'B'},
2: {'edges': [0, 1, 3, 2], 'val': 'C'},
3: {'edges': [0, 1, 2, 3], 'val': 'D'},
})
print_h3('Specific multi-graph')
print(mgraph)
mgraph.render_graph('mgraph.png', strict=False)
print_h3('Specific loopless multi-graph')
try:
lmgraph = LooplessMultiGraph({
0: {'edges': [1, 2, 3], 'val': 'A'},
1: {'edges': [0, 3, 2, 1], 'val': 'B'},
2: {'edges': [0, 1, 3, 2], 'val': 'C'},
3: {'edges': [0, 1, 2, 3], 'val': 'D'},
})
except ValueError:
lmgraph = LooplessMultiGraph({
0: {'edges': [1, 2, 3], 'val': 'A'},
1: {'edges': [0, 3, 2], 'val': 'B'},
2: {'edges': [0, 1, 3], 'val': 'C'},
3: {'edges': [0, 1, 2], 'val': 'D'},
})
print(lmgraph)
| apache-2.0 |
waseem18/bedrock | vendor-local/packages/PyYAML/tests/lib/test_constructor.py | 55 | 9214 |
import yaml
import pprint
import datetime
try:
set
except NameError:
from sets import Set as set
import yaml.tokens
def execute(code):
exec code
return value
def _make_objects():
global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3, YAMLObject1, YAMLObject2, \
AnObject, AnInstance, AState, ACustomState, InitArgs, InitArgsWithState, \
NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict, \
FixedOffset, today, execute
class MyLoader(yaml.Loader):
pass
class MyDumper(yaml.Dumper):
pass
class MyTestClass1:
def __init__(self, x, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
if isinstance(other, MyTestClass1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
def construct1(constructor, node):
mapping = constructor.construct_mapping(node)
return MyTestClass1(**mapping)
def represent1(representer, native):
return representer.represent_mapping("!tag1", native.__dict__)
yaml.add_constructor("!tag1", construct1, Loader=MyLoader)
yaml.add_representer(MyTestClass1, represent1, Dumper=MyDumper)
class MyTestClass2(MyTestClass1, yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = "!tag2"
def from_yaml(cls, constructor, node):
x = constructor.construct_yaml_int(node)
return cls(x=x)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_scalar(cls.yaml_tag, str(native.x))
to_yaml = classmethod(to_yaml)
class MyTestClass3(MyTestClass2):
yaml_tag = "!tag3"
def from_yaml(cls, constructor, node):
mapping = constructor.construct_mapping(node)
if '=' in mapping:
x = mapping['=']
del mapping['=']
mapping['x'] = x
return cls(**mapping)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_mapping(cls.yaml_tag, native.__dict__)
to_yaml = classmethod(to_yaml)
class YAMLObject1(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!foo'
def __init__(self, my_parameter=None, my_another_parameter=None):
self.my_parameter = my_parameter
self.my_another_parameter = my_another_parameter
def __eq__(self, other):
if isinstance(other, YAMLObject1):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class YAMLObject2(yaml.YAMLObject):
yaml_loader = MyLoader
yaml_dumper = MyDumper
yaml_tag = '!bar'
def __init__(self, foo=1, bar=2, baz=3):
self.foo = foo
self.bar = bar
self.baz = baz
def __getstate__(self):
return {1: self.foo, 2: self.bar, 3: self.baz}
def __setstate__(self, state):
self.foo = state[1]
self.bar = state[2]
self.baz = state[3]
def __eq__(self, other):
if isinstance(other, YAMLObject2):
return self.__class__, self.__dict__ == other.__class__, other.__dict__
else:
return False
class AnObject(object):
def __new__(cls, foo=None, bar=None, baz=None):
self = object.__new__(cls)
self.foo = foo
self.bar = bar
self.baz = baz
return self
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AnInstance:
def __init__(self, foo=None, bar=None, baz=None):
self.foo = foo
self.bar = bar
self.baz = baz
def __cmp__(self, other):
return cmp((type(self), self.foo, self.bar, self.baz),
(type(other), other.foo, other.bar, other.baz))
def __eq__(self, other):
return type(self) is type(other) and \
(self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
class AState(AnInstance):
def __getstate__(self):
return {
'_foo': self.foo,
'_bar': self.bar,
'_baz': self.baz,
}
def __setstate__(self, state):
self.foo = state['_foo']
self.bar = state['_bar']
self.baz = state['_baz']
class ACustomState(AnInstance):
def __getstate__(self):
return (self.foo, self.bar, self.baz)
def __setstate__(self, state):
self.foo, self.bar, self.baz = state
class InitArgs(AnInstance):
def __getinitargs__(self):
return (self.foo, self.bar, self.baz)
def __getstate__(self):
return {}
class InitArgsWithState(AnInstance):
def __getinitargs__(self):
return (self.foo, self.bar)
def __getstate__(self):
return self.baz
def __setstate__(self, state):
self.baz = state
class NewArgs(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar, self.baz)
def __getstate__(self):
return {}
class NewArgsWithState(AnObject):
def __getnewargs__(self):
return (self.foo, self.bar)
def __getstate__(self):
return self.baz
def __setstate__(self, state):
self.baz = state
class Reduce(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar, self.baz)
class ReduceWithState(AnObject):
def __reduce__(self):
return self.__class__, (self.foo, self.bar), self.baz
def __setstate__(self, state):
self.baz = state
class MyInt(int):
def __eq__(self, other):
return type(self) is type(other) and int(self) == int(other)
class MyList(list):
def __init__(self, n=1):
self.extend([None]*n)
def __eq__(self, other):
return type(self) is type(other) and list(self) == list(other)
class MyDict(dict):
def __init__(self, n=1):
for k in range(n):
self[k] = None
def __eq__(self, other):
return type(self) is type(other) and dict(self) == dict(other)
class FixedOffset(datetime.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
today = datetime.date.today()
def _load_code(expression):
return eval(expression)
def _serialize_value(data):
if isinstance(data, list):
return '[%s]' % ', '.join(map(_serialize_value, data))
elif isinstance(data, dict):
items = []
for key, value in data.items():
key = _serialize_value(key)
value = _serialize_value(value)
items.append("%s: %s" % (key, value))
items.sort()
return '{%s}' % ', '.join(items)
elif isinstance(data, datetime.datetime):
return repr(data.utctimetuple())
elif isinstance(data, unicode):
return data.encode('utf-8')
elif isinstance(data, float) and data != data:
return '?'
else:
return str(data)
def test_constructor_types(data_filename, code_filename, verbose=False):
_make_objects()
native1 = None
native2 = None
try:
native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
if len(native1) == 1:
native1 = native1[0]
native2 = _load_code(open(code_filename, 'rb').read())
try:
if native1 == native2:
return
except TypeError:
pass
if verbose:
print "SERIALIZED NATIVE1:"
print _serialize_value(native1)
print "SERIALIZED NATIVE2:"
print _serialize_value(native2)
assert _serialize_value(native1) == _serialize_value(native2), (native1, native2)
finally:
if verbose:
print "NATIVE1:"
pprint.pprint(native1)
print "NATIVE2:"
pprint.pprint(native2)
test_constructor_types.unittest = ['.data', '.code']
if __name__ == '__main__':
import sys, test_constructor
sys.modules['test_constructor'] = sys.modules['__main__']
import test_appliance
test_appliance.run(globals())
| mpl-2.0 |
mylons/incubator-airflow | airflow/hooks/sqlite_hook.py | 62 | 1026 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite3
from airflow.hooks.dbapi_hook import DbApiHook
class SqliteHook(DbApiHook):
"""
Interact with SQLite.
"""
conn_name_attr = 'sqlite_conn_id'
default_conn_name = 'sqlite_default'
supports_autocommit = False
def get_conn(self):
"""
Returns a sqlite connection object
"""
conn = self.get_connection(self.sqlite_conn_id)
conn = sqlite3.connect(conn.host)
return conn
| apache-2.0 |
hujiajie/pa-chromium | tools/sharding_supervisor/sharding_supervisor.py | 9 | 2601 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defer to run_test_cases.py."""
import os
import optparse
import sys
ROOT_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def pop_known_arguments(args):
"""Extracts known arguments from the args if present."""
rest = []
run_test_cases_extra_args = []
for arg in args:
if arg.startswith(('--gtest_filter=', '--gtest_output=')):
run_test_cases_extra_args.append(arg)
elif arg == '--run-manual':
run_test_cases_extra_args.append(arg)
elif arg == '--gtest_print_time':
# Ignore.
pass
elif 'interactive_ui_tests' in arg:
# Run this test in a single thread. It is useful to run it under
# run_test_cases so automatic flaky test workaround is still used.
run_test_cases_extra_args.append('-j1')
rest.append(arg)
elif 'browser_tests' in arg:
# Test cases in this executable fire up *a lot* of child processes,
# causing huge memory bottleneck. So use less than N-cpus jobs.
run_test_cases_extra_args.append('--use-less-jobs')
rest.append(arg)
else:
rest.append(arg)
return run_test_cases_extra_args, rest
def main():
parser = optparse.OptionParser()
group = optparse.OptionGroup(
parser, 'Compability flag with the old sharding_supervisor')
group.add_option(
'--no-color', action='store_true', help='Ignored')
group.add_option(
'--retry-failed', action='store_true', help='Ignored')
group.add_option(
'-t', '--timeout', type='int', help='Kept as --timeout')
group.add_option(
'--total-slaves', type='int', default=1, help='Converted to --index')
group.add_option(
'--slave-index', type='int', default=0, help='Converted to --shards')
parser.add_option_group(group)
parser.disable_interspersed_args()
options, args = parser.parse_args()
swarm_client_dir = os.path.join(ROOT_DIR, 'tools', 'swarm_client')
sys.path.insert(0, swarm_client_dir)
cmd = [
'--shards', str(options.total_slaves),
'--index', str(options.slave_index),
'--no-dump',
'--no-cr',
]
if options.timeout is not None:
cmd.extend(['--timeout', str(options.timeout)])
run_test_cases_extra_args, rest = pop_known_arguments(args)
import run_test_cases # pylint: disable=F0401
return run_test_cases.main(cmd + run_test_cases_extra_args + ['--'] + rest)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
mbrochh/django-filer | filer/models/imagemodels.py | 8 | 7627 | #-*- coding: utf-8 -*-
import logging
try:
from PIL import Image as PILImage
except ImportError:
try:
import Image as PILImage
except ImportError:
raise ImportError("The Python Imaging Library was not found.")
from datetime import datetime
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer import settings as filer_settings
from filer.models.filemodels import File
from filer.utils.filer_easy_thumbnails import FilerThumbnailer
from filer.utils.pil_exif import get_exif_for_file
import os
logger = logging.getLogger("filer")
class Image(File):
SIDEBAR_IMAGE_WIDTH = 210
DEFAULT_THUMBNAILS = {
'admin_clipboard_icon': {'size': (32, 32), 'crop': True,
'upscale': True},
'admin_sidebar_preview': {'size': (SIDEBAR_IMAGE_WIDTH, 10000)},
'admin_directory_listing_icon': {'size': (48, 48),
'crop': True, 'upscale': True},
'admin_tiny_icon': {'size': (32, 32), 'crop': True, 'upscale': True},
}
file_type = 'Image'
_icon = "image"
_height = models.IntegerField(null=True, blank=True)
_width = models.IntegerField(null=True, blank=True)
date_taken = models.DateTimeField(_('date taken'), null=True, blank=True,
editable=False)
default_alt_text = models.CharField(_('default alt text'), max_length=255, blank=True, null=True)
default_caption = models.CharField(_('default caption'), max_length=255, blank=True, null=True)
author = models.CharField(_('author'), max_length=255, null=True, blank=True)
must_always_publish_author_credit = models.BooleanField(_('must always publish author credit'), default=False)
must_always_publish_copyright = models.BooleanField(_('must always publish copyright'), default=False)
subject_location = models.CharField(_('subject location'), max_length=64, null=True, blank=True,
default=None)
@classmethod
def matches_file_type(cls, iname, ifile, request):
# This was originally in admin/clipboardadmin.py it was inside of a try
# except, I have moved it here outside of a try except because I can't
# figure out just what kind of exception this could generate... all it was
# doing for me was obscuring errors...
# --Dave Butler <croepha@gmail.com>
iext = os.path.splitext(iname)[1].lower()
return iext in ['.jpg', '.jpeg', '.png', '.gif']
def save(self, *args, **kwargs):
if self.date_taken is None:
try:
exif_date = self.exif.get('DateTimeOriginal', None)
if exif_date is not None:
d, t = str.split(exif_date.values)
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(
int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
pass
if self.date_taken is None:
self.date_taken = datetime.now()
self.has_all_mandatory_data = self._check_validity()
try:
# do this more efficient somehow?
self.file.seek(0)
self._width, self._height = PILImage.open(self.file).size
except Exception:
# probably the image is missing. nevermind.
pass
super(Image, self).save(*args, **kwargs)
def _check_validity(self):
if not self.name:
return False
return True
def sidebar_image_ratio(self):
if self.width:
return float(self.width) / float(self.SIDEBAR_IMAGE_WIDTH)
else:
return 1.0
def _get_exif(self):
if hasattr(self, '_exif_cache'):
return self._exif_cache
else:
if self.file:
self._exif_cache = get_exif_for_file(self.file.path)
else:
self._exif_cache = {}
return self._exif_cache
exif = property(_get_exif)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated() or not user.is_staff:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
@property
def label(self):
if self.name in ['', None]:
return self.original_filename or 'unnamed file'
else:
return self.name
@property
def width(self):
return self._width or 0
@property
def height(self):
return self._height or 0
@property
def icons(self):
_icons = {}
for size in filer_settings.FILER_ADMIN_ICON_SIZES:
try:
thumbnail_options = {
'size': (int(size), int(size)),
'crop': True,
'upscale': True,
'subject_location': self.subject_location}
thumb = self.file.get_thumbnail(thumbnail_options)
_icons[size] = thumb.url
except Exception, e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while generating icons: %s',e)
if filer_settings.FILER_DEBUG:
raise e
return _icons
@property
def thumbnails(self):
_thumbnails = {}
for name, opts in Image.DEFAULT_THUMBNAILS.items():
try:
opts.update({'subject_location': self.subject_location})
thumb = self.file.get_thumbnail(opts)
_thumbnails[name] = thumb.url
except Exception,e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while generating thumbnail: %s',e)
if filer_settings.FILER_DEBUG:
raise e
return _thumbnails
@property
def easy_thumbnails_thumbnailer(self):
tn = FilerThumbnailer(
file=self.file.file, name=self.file.name,
source_storage=self.file.source_storage,
thumbnail_storage=self.file.thumbnail_storage,
thumbnail_basedir=self.file.thumbnail_basedir)
return tn
class Meta:
app_label = 'filer'
verbose_name = _('image')
verbose_name_plural = _('images')
| bsd-3-clause |
homyPi/homyPi_raspberryApp | src/python/rabbitConnection.py | 2 | 6760 | import logging
import threading
import json
import inspect
import sys
import traceback
import rabbitpy
import time
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(filename='module.log',level=logging.INFO, format=LOG_FORMAT)
'''
class RabbitConnection(threading.Thread):
handlers = []
def __init__(self, EXCHANGE, ROUTING_KEY, QUEUE = "", exchange_type = "direct"):
threading.Thread.__init__(self)
self.onConnectedCallback = None
self.running = False
self.EXCHANGE = EXCHANGE
self.QUEUE = QUEUE
self.ROUTING_KEY = ROUTING_KEY
self.connection = rabbitpy.Connection()
# Open the channel, declare and bind the exchange and queue
with self.connection.channel() as channel:
# Declare the exchange
exchange = rabbitpy.Exchange(channel, self.EXCHANGE, exchange_type = exchange_type)
exchange.declare()
# Declare the queue
queue = rabbitpy.Queue(channel, self.QUEUE, auto_delete = False)
queue.declare()
# Bind the queue to the exchange
queue.bind(self.EXCHANGE, self.ROUTING_KEY)
if self.QUEUE == "":
self.QUEUE = str(queue.name)
def consumer(self):
"""Consume
:param rabbitpy.Connection connection: The connection to consume on
"""
try:
print("start consuming")
print("waiting for messages")
self.queueConsuming = rabbitpy.Queue(self.connection.channel(), self.QUEUE, auto_delete = True)
for message in self.queueConsuming.consume():
self.handleMessage(message.body)
message.ack()
except KeyboardInterrupt:
print 'Exited consumer'
def emit(self, message, data = None, type = None):
with self.connection.channel() as channel:
LOGGER.info(message+": "+str(data)+ " to "+str(self.EXCHANGE)+":"+str(self.ROUTING_KEY)+ " type="+str(type))
if data is None:
if type is not None:
body = json.dumps({"message": message, "type": type})
else:
body = json.dumps({"message": message})
else:
if type is not None:
body = json.dumps({"message": message,"data": data, "type": type})
else:
body = json.dumps({"message": message,"data": data})
message = rabbitpy.Message(channel, body)
message.publish(self.EXCHANGE, self.ROUTING_KEY)
def handleMessage(self, body):
try:
data = json.loads(str(body))
if "message" in data:
LOGGER.info("got message: "+str(data['message']))
LOGGER.info("looking in " + str(len(self.handlers)) + " handlers")
for handler in self.handlers:
LOGGER.info(str(data['message'])+" == "+str(handler[0]))
if data['message'] == handler[0]:
LOGGER.info("found "+str(handler[1]))
args = inspect.getargspec(handler[1]).args
if 'self' in args:
nbArg = len(args) - 1
else:
nbArg = len(args)
LOGGER.info("function takes " + str(nbArg) + " args");
if nbArg == 0:
try:
handler[1]()
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()[1]
print traceback.format_exc()
else:
try:
if "data" in data:
LOGGER.info(data)
handler[1](data["data"])
else:
handler[1](None)
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()[1]
LOGGER.error(traceback.format_exc())
break;
except ValueError:
LOGGER.warn("unable to load " + str(body))
def addHandler(self, message, callback):
LOGGER.info("adding handler for " + message)
self.handlers.append([message, callback])
def connected(self):
LOGGER.info("Sending connected callbacks")
if self.onConnectedCallback is not None:
self.onConnectedCallback()
def onConnected(self, callback):
self.onConnectedCallback = callback
def run(self):
print("run")
self.running = True
self.connected()
self.consumer()
def stop(self):
print("stop")
self.running = False
self.queueConsuming.stop_consuming()
class ServerRequester(RabbitConnection):
def __init__(self, routing_key):
RabbitConnection.__init__(self, "serverRequest", routing_key, exchange_type = "topic")
self.start()
def emit(self, message, data = None, type = None):
with self.connection.channel() as channel:
LOGGER.debug(message+": "+str(data)+ " to "+str(self.EXCHANGE)+":"+str(self.ROUTING_KEY)+ " type="+str(type))
if data is None:
if type is not None:
body = json.dumps({"message": message, "type": type})
else:
body = json.dumps({"message": message})
else:
if type is not None:
body = json.dumps({"message": message,"data": data, "type": type})
else:
body = json.dumps({"message": message,"data": data})
message = rabbitpy.Message(channel, body, properties={"reply_to": str(self.queueConsuming.name)});
message.publish(self.EXCHANGE, self.ROUTING_KEY)
if __name__ == "__main__":
try:
rabbitConnection = RabbitConnection("player", "player")
print("starting consumer")
rabbitConnection.start()
server = ServerRequester("serverRequest.player")
while True:
time.sleep(0.5)
except KeyboardInterrupt:
server.stop()
server.join()
rabbitConnection.stop()
rabbitConnection.join()
rabbitConnection.join()
sys.exit(2)
''' | mit |
GoSteven/Diary | django/contrib/auth/tokens.py | 12 | 3288 | from datetime import date
from django.conf import settings
from django.utils.hashcompat import sha_constructor
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
# Fallback to Django 1.2 method for compatibility.
# PendingDeprecationWarning <- here to remind us to remove this in
# Django 1.5
if not constant_time_compare(self._make_token_with_timestamp_old(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
value = unicode(user.id) + \
user.password + user.last_login.strftime('%Y-%m-%d %H:%M:%S') + \
unicode(timestamp)
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_token_with_timestamp_old(self, user, timestamp):
# The Django 1.2 method
ts_b36 = int_to_base36(timestamp)
hash = sha_constructor(settings.SECRET_KEY + unicode(user.id) +
user.password + user.last_login.strftime('%Y-%m-%d %H:%M:%S') +
unicode(timestamp)).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001,1,1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
thiagopnts/servo | tests/wpt/web-platform-tests/subresource-integrity/tools/list_hashes.py | 13 | 2129 | from os import path, listdir
from hashlib import sha512, sha384, sha256, md5
from base64 import b64encode
from random import randint
import ed25519
import re
DIR = path.normpath(path.join(__file__, "..", ".."))
'''
Yield each javascript and css file in the directory
'''
def js_and_css_files():
for f in listdir(DIR):
if path.isfile(f) and (f.endswith(".js") or f.endswith(".css")):
yield f
'''
URL-safe base64 encode a binary digest and strip any padding.
'''
def format_digest(digest):
return b64encode(digest)
'''
Generate an encoded sha512 URI.
'''
def sha512_uri(content):
return "sha512-%s" % format_digest(sha512(content).digest())
'''
Generate an encoded sha384 URI.
'''
def sha384_uri(content):
return "sha384-%s" % format_digest(sha384(content).digest())
'''
Generate an encoded sha256 URI.
'''
def sha256_uri(content):
return "sha256-%s" % format_digest(sha256(content).digest())
'''
Generate an encoded ed25519 signature.
'''
def ed25519_signature(private_public_key, content):
signature = ed25519.signature(content, *private_public_key)
return "ed25519-%s" % format_digest(signature)
'''
Generate private + public key pair for ed25519 signatures.
'''
def ed25519_key_pair():
secret_key = ''.join(chr(randint(0, 255)) for _ in range(0,32))
public_key = ed25519.publickey(secret_key)
return (secret_key, public_key)
'''
Generate an encoded md5 digest URI.
'''
def md5_uri(content):
return "md5-%s" % format_digest(md5(content).digest())
def main():
ed25519_key = ed25519_key_pair()
for file in js_and_css_files():
print "Listing hash values for %s" % file
with open(file, "r") as content_file:
content = content_file.read()
print "\tSHA512 integrity: %s" % sha512_uri(content)
print "\tSHA384 integrity: %s" % sha384_uri(content)
print "\tSHA256 integrity: %s" % sha256_uri(content)
print "\tMD5 integrity: %s" % md5_uri(content)
print "\tEd25519 integrity: %s" % ed25519_signature(ed25519_key, content)
print "\nEd25519 public key (used above): %s" % format_digest(ed25519_key[1])
if __name__ == "__main__":
main()
| mpl-2.0 |
beatrizjesus/my-first-blog | pasta/Lib/site-packages/django/core/management/commands/test.py | 267 | 3933 | import logging
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
help = 'Discover and run tests in the specified modules or the current directory.'
requires_system_checks = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument('args', metavar='test_label', nargs='*',
help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method')
parser.add_argument('--noinput',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
parser.add_argument('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.'),
parser.add_argument('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.'),
parser.add_argument('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081.'),
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, 'option_list'):
# Keeping compatibility with both optparse and argparse at this level
# would be too heavy for a non-critical item
raise RuntimeError(
"The method to extend accepted command-line arguments by the "
"test management command has changed in Django 1.8. Please "
"create an add_arguments class method to achieve this.")
if hasattr(test_runner_class, 'add_arguments'):
test_runner_class.add_arguments(parser)
def execute(self, *args, **options):
if options['verbosity'] > 0:
# ensure that deprecation warnings are displayed during testing
# the following state is assumed:
# logging.capturewarnings is true
# a "default" level warnings filter has been added for
# DeprecationWarning. See django.conf.LazySettings._configure_logging
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
super(Command, self).execute(*args, **options)
if options['verbosity'] > 0:
# remove the testing-specific handler
logger.removeHandler(handler)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options.get('testrunner'))
if options.get('liveserver') is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
| mit |
roadmapper/ansible | lib/ansible/plugins/loader.py | 11 | 39388 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import os.path
import sys
import warnings
from collections import defaultdict
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import string_types
from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
from ansible.utils.collection_loader import AnsibleCollectionLoader, AnsibleFlatMapLoader, AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.plugin_docs import add_fragments
try:
import importlib.util
imp = None
except ImportError:
import imp
# HACK: keep Python 2.6 controller tests happy in CI until they're properly split
try:
from importlib import import_module
except ImportError:
import_module = __import__
display = Display()
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
def add_all_plugin_dirs(path):
''' add any existing plugin dirs in the path provided '''
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
if os.path.isdir(plugin_path):
obj.add_directory(to_text(plugin_path))
else:
display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
def get_shell_plugin(shell_type=None, executable=None):
if not shell_type:
# default to sh
shell_type = 'sh'
# mostly for backwards compat
if executable:
if isinstance(executable, string_types):
shell_filename = os.path.basename(executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
shell_type = shell.SHELL_FAMILY
break
else:
raise AnsibleError("Either a shell type or a shell executable must be provided ")
shell = shell_loader.get(shell_type)
if not shell:
raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
if executable:
setattr(shell, 'executable', executable)
return shell
def add_dirs_to_loader(which_loader, paths):
loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
for path in paths:
loader.add_directory(path, with_subdir=True)
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of play basedirs, configured
paths, and the python path. The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
aliases = {} if aliases is None else aliases
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.subdir = subdir
# FIXME: remove alias dict in favor of alias by symlink?
self.aliases = aliases
if config and not isinstance(config, list):
config = [config]
elif not config:
config = []
self.config = config
if class_name not in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if class_name not in PATH_CACHE:
PATH_CACHE[class_name] = None
if class_name not in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
# hold dirs added at runtime outside of config
self._extra_dirs = []
# caches
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._searched_paths = set()
def _clear_caches(self):
if C.OLD_PLUGIN_CACHE_CLEARING:
self._paths = None
else:
# reset global caches
MODULE_CACHE[self.class_name] = {}
PATH_CACHE[self.class_name] = None
PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
# reset internal caches
self._module_cache = MODULE_CACHE[self.class_name]
self._paths = PATH_CACHE[self.class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name=self.class_name,
base_class=self.base_class,
package=self.package,
config=self.config,
subdir=self.subdir,
aliases=self.aliases,
_extra_dirs=self._extra_dirs,
_searched_paths=self._searched_paths,
PATH_CACHE=PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
)
def format_paths(self, paths):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in paths:
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def print_paths(self):
return self.format_paths(self._get_paths(subdirs=False))
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir, followlinks=True):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root, x))
return results
def _get_package_paths(self, subdirs=True):
''' Gets the path of a Python package '''
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
self.package_path = os.path.dirname(m.__file__)
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
def _get_paths(self, subdirs=True):
''' Return a list of paths to search for plugins in '''
# FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
# In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
# which always calls it with subdirs=False. So there currently isn't a problem with this caching.
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
path = os.path.realpath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
# Note package path always gets added last so that every other type of
# path is searched before it.
ret.extend(self._get_package_paths(subdirs=subdirs))
# HACK: because powershell modules are in the same directory
# hierarchy as other modules we have to process them last. This is
# because powershell only works on windows but the other modules work
# anywhere (possibly including windows if the correct language
# interpreter is installed). the non-powershell modules can have any
# file extension and thus powershell modules are picked up in that.
# The non-hack way to fix this is to have powershell modules be
# a different PluginLoader/ModuleLoader. But that requires changing
# other things too (known thing to change would be PATHS_CACHE,
# PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
# on the class_name and neither regular modules nor powershell modules
# would have class_names, they would not work as written.
#
# The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
# also in the original order they were found in 'ret'.
# The .sort() method is guaranteed to be stable, so original order is preserved.
ret.sort(key=lambda p: p.endswith('/windows'))
# cache and return the result
self._paths = ret
return ret
def _load_config_defs(self, name, module, path):
''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
# plugins w/o class name don't support config
if self.class_name:
type_name = get_plugin_class(self.class_name)
# if type name != 'module_doc_fragment':
if type_name in C.CONFIGURABLE_PLUGINS:
dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
if dstring:
add_fragments(dstring, path, fragment_loader=fragment_loader)
if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._clear_caches()
display.debug('Added %s to loader search path' % (directory))
def _find_fq_plugin(self, fq_name, extension):
"""Search builtin paths to find a plugin. No external paths are searched,
meaning plugins inside roles inside collections will be ignored.
"""
plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
n_resource = to_native(acr.resource, errors='strict')
# we want this before the extension is added
full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
if extension:
n_resource += extension
pkg = sys.modules.get(acr.n_python_package_name)
if not pkg:
# FIXME: there must be cheaper/safer way to do this
pkg = import_module(acr.n_python_package_name)
# if the package is one of our flatmaps, we need to consult its loader to find the path, since the file could be
# anywhere in the tree
if hasattr(pkg, '__loader__') and isinstance(pkg.__loader__, AnsibleFlatMapLoader):
try:
file_path = pkg.__loader__.find_file(n_resource)
return full_name, to_text(file_path)
except IOError:
# this loader already takes care of extensionless files, so if we didn't find it, just bail
return None, None
pkg_path = os.path.dirname(pkg.__file__)
n_resource_path = os.path.join(pkg_path, n_resource)
# FIXME: and is file or file link or ...
if os.path.exists(n_resource_path):
return full_name, to_text(n_resource_path)
# look for any matching extension in the package location (sans filter)
ext_blacklist = ['.pyc', '.pyo']
found_files = [f for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*') if os.path.isfile(f) and os.path.splitext(f)[1] not in ext_blacklist]
if not found_files:
return None, None
if len(found_files) > 1:
# TODO: warn?
pass
return full_name, to_text(found_files[0])
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name '''
return self.find_plugin_with_name(name, mod_type, ignore_deprecated, check_aliases, collection_list)[1]
def find_plugin_with_name(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
''' Find a plugin named name '''
global _PLUGIN_FILTERS
if name in _PLUGIN_FILTERS[self.package]:
return None, None
if mod_type:
suffix = mod_type
elif self.class_name:
# Ansible plugins that run in the controller process (most plugins)
suffix = '.py'
else:
# Only Ansible Modules. Ansible modules can be any executable so
# they can have any suffix
suffix = ''
# FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
if '.' in name or not collection_list:
candidates = [name]
else:
candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
# TODO: keep actual errors, not just assembled messages
errors = []
for candidate_name in candidates:
try:
# HACK: refactor this properly
if candidate_name.startswith('ansible.legacy'):
# 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
# They need to search 'library' and the various '*_plugins' directories in order to find the file.
full_name = name
p = self._find_plugin_legacy(name.replace('ansible.legacy.', '', 1), ignore_deprecated, check_aliases, suffix)
else:
# 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
full_name, p = self._find_fq_plugin(candidate_name, suffix)
if p:
return full_name, p
except Exception as ex:
errors.append(to_native(ex))
if errors:
display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(errors)))
return None, None
# if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
return name, self._find_plugin_legacy(name, ignore_deprecated, check_aliases, suffix)
def _find_plugin_legacy(self, name, ignore_deprecated=False, check_aliases=False, suffix=None):
"""Search library and various *_plugins paths in order to find the file.
This was behavior prior to the existence of collections.
"""
if check_aliases:
name = self.aliases.get(name, name)
# The particular cache to look for modules within. This matches the
# requested mod_type
pull_cache = self._plugin_path_cache[suffix]
try:
return pull_cache[name]
except KeyError:
# Cache miss. Now let's find the plugin
pass
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator. Currently, it
# looks like _get_paths() never forces a cache refresh so if we expect
# additional directories to be added later, it is buggy.
for path in (p for p in self._get_paths() if p not in self._searched_paths and os.path.isdir(p)):
display.debug('trying %s' % path)
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
# FIXME: I believe this is only correct for modules and module_utils.
# For all other plugins we want .pyc and .pyo should be valid
if any(full_path.endswith(x) for x in C.BLACKLIST_EXTS):
continue
splitname = os.path.splitext(full_name)
base_name = splitname[0]
try:
extension = splitname[1]
except IndexError:
extension = ''
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = full_path
if full_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][full_name] = full_path
if base_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][base_name] = full_path
if full_name not in self._plugin_path_cache[extension]:
self._plugin_path_cache[extension][full_name] = full_path
self._searched_paths.add(path)
try:
return pull_cache[name]
except KeyError:
# Didn't find the plugin in this directory. Load modules from the next one
pass
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
alias_name = '_' + name
# We've already cached all the paths at this point
if alias_name in pull_cache:
if not ignore_deprecated and not os.path.islink(pull_cache[alias_name]):
# FIXME: this is not always the case, some are just aliases
display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
return pull_cache[alias_name]
return None
def has_plugin(self, name, collection_list=None):
''' Checks if a plugin named name exists '''
try:
return self.find_plugin(name, collection_list=collection_list) is not None
except Exception as ex:
if isinstance(ex, AnsibleError):
raise
# log and continue, likely an innocuous type/package loading failure in collections import
display.debug('has_plugin error: {0}'.format(to_text(ex)))
__contains__ = has_plugin
def _load_module_source(self, name, path):
# avoid collisions across plugins
if name.startswith('ansible_collections.'):
full_name = name
else:
full_name = '.'.join([self.package, name])
if full_name in sys.modules:
# Avoids double loading, See https://github.com/ansible/ansible/issues/13110
return sys.modules[full_name]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if imp is None:
spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[full_name] = module
else:
with open(to_bytes(path), 'rb') as module_file:
# to_native is used here because imp.load_source's path is for tracebacks and python's traceback formatting uses native strings
module = imp.load_source(to_native(full_name), to_native(path), module_file)
return module
def _update_object(self, obj, name, path):
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
setattr(obj, '_load_name', name)
def get(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
found_in_cache = True
class_only = kwargs.pop('class_only', False)
collection_list = kwargs.pop('collection_list', None)
if name in self.aliases:
name = self.aliases[name]
name, path = self.find_plugin_with_name(name, collection_list=collection_list)
if path is None:
return None
if path not in self._module_cache:
self._module_cache[path] = self._load_module_source(name, path)
self._load_config_defs(name, self._module_cache[path], path)
found_in_cache = False
obj = getattr(self._module_cache[path], self.class_name)
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
return None
if not issubclass(obj, plugin_class):
return None
self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
if "abstract" in e.args[0]:
# Abstract Base Class. The found plugin file does not
# fully implement the defined interface.
return None
raise
self._update_object(obj, name, path)
return obj
def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
''' formats data to display debug info for plugin loading, also avoids processing unless really needed '''
if C.DEFAULT_DEBUG:
msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
if len(searched_paths) > 1:
msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
if found_in_cache or class_only:
msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
display.debug(msg)
def all(self, *args, **kwargs):
'''
Iterate through all plugins of this type
A plugin loader is initialized with a specific type. This function is an iterator returning
all of the plugins of that type to the caller.
:kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
instead of an instance of the plugin. This conflicts with class_only and both should
not be set.
:kwarg class_only: If this is set to True then we return the python class which implements
a plugin rather than an instance of the plugin. This conflicts with path_only and both
should not be set.
:kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
should take precedence. If this is set to False, then we return all of the plugins
found, including those with duplicate names. In the case of duplicates, the order in
which they are returned is the one that would take precedence first, followed by the
others in decreasing precedence order. This should only be used by subclasses which
want to manage their own deduplication of the plugins.
:*args: Any extra arguments are passed to each plugin when it is instantiated.
:**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
'''
# TODO: Change the signature of this method to:
# def all(return_type='instance', args=None, kwargs=None):
# if args is None: args = []
# if kwargs is None: kwargs = {}
# return_type can be instance, class, or path.
# These changes will mean that plugin parameters won't conflict with our params and
# will also make it impossible to request both a path and a class at the same time.
#
# Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
# tests setting it to True
global _PLUGIN_FILTERS
dedupe = kwargs.pop('_dedupe', True)
path_only = kwargs.pop('path_only', False)
class_only = kwargs.pop('class_only', False)
# Having both path_only and class_only is a coding bug
if path_only and class_only:
raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
all_matches = []
found_in_cache = True
for i in self._get_paths():
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
loaded_modules = set()
for path in sorted(all_matches, key=os.path.basename):
name = os.path.splitext(path)[0]
basename = os.path.basename(name)
if basename == '__init__' or basename in _PLUGIN_FILTERS[self.package]:
continue
if dedupe and basename in loaded_modules:
continue
loaded_modules.add(basename)
if path_only:
yield path
continue
if path not in self._module_cache:
try:
if self.subdir in ('filter_plugins', 'test_plugins'):
# filter and test plugin files can contain multiple plugins
# they must have a unique python module name to prevent them from shadowing each other
full_name = '{0}_{1}'.format(abs(hash(path)), basename)
else:
full_name = basename
module = self._load_module_source(full_name, path)
self._load_config_defs(basename, module, path)
except Exception as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
self._module_cache[path] = module
found_in_cache = False
try:
obj = getattr(self._module_cache[path], self.class_name)
except AttributeError as e:
display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
continue
if self.base_class:
# The import path is hardcoded and should be the right place,
# so we are not expecting an ImportError.
module = __import__(self.package, fromlist=[self.base_class])
# Check whether this obj has the required base class.
try:
plugin_class = getattr(module, self.base_class)
except AttributeError:
continue
if not issubclass(obj, plugin_class):
continue
self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
if not class_only:
try:
obj = obj(*args, **kwargs)
except TypeError as e:
display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
self._update_object(obj, basename, path)
yield obj
class Jinja2Loader(PluginLoader):
"""
PluginLoader optimized for Jinja2 plugins
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
The way the calling code is setup, we need to do a few things differently in the all() method
"""
def find_plugin(self, name, collection_list=None):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
if '.' in name:
return super(Jinja2Loader, self).find_plugin(name, collection_list=collection_list)
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def get(self, name, *args, **kwargs):
# Nothing using Jinja2Loader use this method. We can't use the base class version because
# we deduplicate differently than the base class
if '.' in name:
return super(Jinja2Loader, self).get(name, *args, **kwargs)
raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
def all(self, *args, **kwargs):
"""
Differences with :meth:`PluginLoader.all`:
* We do not deduplicate ansible plugin names. This is because we don't care about our
plugin names, here. We care about the names of the actual jinja2 plugins which are inside
of our plugins.
* We reverse the order of the list of plugins compared to other PluginLoaders. This is
because of how calling code chooses to sync the plugins from the list. It adds all the
Jinja2 plugins from one of our Ansible plugins into a dict. Then it adds the Jinja2
plugins from the next Ansible plugin, overwriting any Jinja2 plugins that had the same
name. This is an encapsulation violation (the PluginLoader should not know about what
calling code does with the data) but we're pushing the common code here. We'll fix
this in the future by moving more of the common code into this PluginLoader.
* We return a list. We could iterate the list instead but that's extra work for no gain because
the API receiving this doesn't care. It just needs an iterable
"""
# We don't deduplicate ansible plugin names. Instead, calling code deduplicates jinja2
# plugin names.
kwargs['_dedupe'] = False
# We have to instantiate a list of all plugins so that we can reverse it. We reverse it so
# that calling code will deduplicate this correctly.
plugins = [p for p in super(Jinja2Loader, self).all(*args, **kwargs)]
plugins.reverse()
return plugins
def _load_plugin_filter():
filters = defaultdict(frozenset)
user_set = False
if C.PLUGIN_FILTERS_CFG is None:
filter_cfg = '/etc/ansible/plugin_filters.yml'
else:
filter_cfg = C.PLUGIN_FILTERS_CFG
user_set = True
if os.path.exists(filter_cfg):
with open(filter_cfg, 'rb') as f:
try:
filter_data = from_yaml(f.read())
except Exception as e:
display.warning(u'The plugin filter file, {0} was not parsable.'
u' Skipping: {1}'.format(filter_cfg, to_text(e)))
return filters
try:
version = filter_data['filter_version']
except KeyError:
display.warning(u'The plugin filter file, {0} was invalid.'
u' Skipping.'.format(filter_cfg))
return filters
# Try to convert for people specifying version as a float instead of string
version = to_text(version)
version = version.strip()
if version == u'1.0':
# Modules and action plugins share the same blacklist since the difference between the
# two isn't visible to the users
try:
filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
except TypeError:
display.warning(u'Unable to parse the plugin filter file {0} as'
u' module_blacklist is not a list.'
u' Skipping.'.format(filter_cfg))
return filters
filters['ansible.plugins.action'] = filters['ansible.modules']
else:
display.warning(u'The plugin filter file, {0} was a version not recognized by this'
u' version of Ansible. Skipping.'.format(filter_cfg))
else:
if user_set:
display.warning(u'The plugin filter file, {0} does not exist.'
u' Skipping.'.format(filter_cfg))
# Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
if 'stat' in filters['ansible.modules']:
raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
' Ansible will not function without the stat module. Please remove stat'
' from the blacklist.'.format(to_native(filter_cfg)))
return filters
def _configure_collection_loader():
if not any((isinstance(l, AnsibleCollectionLoader) for l in sys.meta_path)):
sys.meta_path.insert(0, AnsibleCollectionLoader(C.config))
# TODO: All of the following is initialization code It should be moved inside of an initialization
# function which is called at some point early in the ansible and ansible-playbook CLI startup.
_PLUGIN_FILTERS = _load_plugin_filter()
_configure_collection_loader()
# doc fragments first
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.plugins.doc_fragments',
C.DOC_FRAGMENT_PLUGIN_PATH,
'doc_fragments',
)
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connection',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
# regular module_utils doesn't. This can be revisited once we have more granular loaders.
ps_module_utils_loader = PluginLoader(
'',
'ansible.module_utils',
C.DEFAULT_MODULE_UTILS_PATH,
'module_utils',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
filter_loader = Jinja2Loader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
test_loader = Jinja2Loader(
'TestModule',
'ansible.plugins.test',
C.DEFAULT_TEST_PLUGIN_PATH,
'test_plugins'
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategy',
C.DEFAULT_STRATEGY_PLUGIN_PATH,
'strategy_plugins',
required_base_class='StrategyBase',
)
terminal_loader = PluginLoader(
'TerminalModule',
'ansible.plugins.terminal',
C.DEFAULT_TERMINAL_PLUGIN_PATH,
'terminal_plugins',
required_base_class='TerminalBase'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
cliconf_loader = PluginLoader(
'Cliconf',
'ansible.plugins.cliconf',
C.DEFAULT_CLICONF_PLUGIN_PATH,
'cliconf_plugins',
required_base_class='CliconfBase'
)
netconf_loader = PluginLoader(
'Netconf',
'ansible.plugins.netconf',
C.DEFAULT_NETCONF_PLUGIN_PATH,
'netconf_plugins',
required_base_class='NetconfBase'
)
inventory_loader = PluginLoader(
'InventoryModule',
'ansible.plugins.inventory',
C.DEFAULT_INVENTORY_PLUGIN_PATH,
'inventory_plugins'
)
httpapi_loader = PluginLoader(
'HttpApi',
'ansible.plugins.httpapi',
C.DEFAULT_HTTPAPI_PLUGIN_PATH,
'httpapi_plugins',
required_base_class='HttpApiBase',
)
become_loader = PluginLoader(
'BecomeModule',
'ansible.plugins.become',
C.BECOME_PLUGIN_PATH,
'become_plugins'
)
| gpl-3.0 |
bpsinc-native/src_third_party_webpagereplay | third_party/dns/rdtypes/ANY/LOC.py | 248 | 12571 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
_pows = (1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L,
100000000L, 1000000000L, 10000000000L)
def _exponent_of(what, desc):
exp = None
for i in xrange(len(_pows)):
if what // _pows[i] == 0L:
exp = i - 1
break
if exp is None or exp < 0:
raise dns.exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = long(round(what * 3600000))
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees * sign, minutes, seconds, what)
def _tuple_to_float(what):
if what[0] < 0:
sign = -1
value = float(what[0]) * -1
else:
sign = 1
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return sign * value
def _encode_size(what, desc):
what = long(what);
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns.exception.SyntaxError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns.exception.SyntaxError("bad %s base" % desc)
return long(base) * pow(10, exponent)
class LOC(dns.rdata.Rdata):
"""LOC record
@ivar latitude: latitude
@type latitude: (int, int, int, int) tuple specifying the degrees, minutes,
seconds, and milliseconds of the coordinate.
@ivar longitude: longitude
@type longitude: (int, int, int, int) tuple specifying the degrees,
minutes, seconds, and milliseconds of the coordinate.
@ivar altitude: altitude
@type altitude: float
@ivar size: size of the sphere
@type size: float
@ivar horizontal_precision: horizontal precision
@type horizontal_precision: float
@ivar vertical_precision: vertical precision
@type vertical_precision: float
@see: RFC 1876"""
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=1.0, hprec=10000.0, vprec=10.0):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats."""
super(LOC, self).__init__(rdclass, rdtype)
if isinstance(latitude, int) or isinstance(latitude, long):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
self.latitude = latitude
if isinstance(longitude, int) or isinstance(longitude, long):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
self.longitude = longitude
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[0] > 0:
lat_hemisphere = 'N'
lat_degrees = self.latitude[0]
else:
lat_hemisphere = 'S'
lat_degrees = -1 * self.latitude[0]
if self.longitude[0] > 0:
long_hemisphere = 'E'
long_degrees = self.longitude[0]
else:
long_hemisphere = 'W'
long_degrees = -1 * self.longitude[0]
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
lat_degrees, self.latitude[1], self.latitude[2], self.latitude[3],
lat_hemisphere, long_degrees, self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere, self.altitude / 100.0
)
if self.size != 1.0 or self.horizontal_precision != 10000.0 or \
self.vertical_precision != 10.0:
text += " %0.2fm %0.2fm %0.2fm" % (
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = [0, 0, 0, 0]
longitude = [0, 0, 0, 0]
size = 1.0
hprec = 10000.0
vprec = 10.0
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad latitude seconds value')
latitude[2] = int(seconds)
if latitude[2] >= 60:
raise dns.exception.SyntaxError('latitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[0] *= -1
elif t != 'N':
raise dns.exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError('bad longitude seconds value')
longitude[2] = int(seconds)
if longitude[2] >= 60:
raise dns.exception.SyntaxError('longitude seconds >= 60')
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError('bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[0] *= -1
elif t != 'E':
raise dns.exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0 : -1]
altitude = float(t) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
size = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
hprec = float(value) * 100.0 # m -> cm
token = tok.get().unescape()
if not token.is_eol_or_eof():
value = token.value
if value[-1] == 'm':
value = value[0 : -1]
vprec = float(value) * 100.0 # m -> cm
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
if self.latitude[0] < 0:
sign = -1
degrees = long(-1 * self.latitude[0])
else:
sign = 1
degrees = long(self.latitude[0])
milliseconds = (degrees * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * sign
latitude = 0x80000000L + milliseconds
if self.longitude[0] < 0:
sign = -1
degrees = long(-1 * self.longitude[0])
else:
sign = 1
degrees = long(self.longitude[0])
milliseconds = (degrees * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * sign
longitude = 0x80000000L + milliseconds
altitude = long(self.altitude) + 10000000L
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
struct.unpack("!BBBBIII", wire[current : current + rdlen])
if latitude > 0x80000000L:
latitude = float(latitude - 0x80000000L) / 3600000
else:
latitude = -1 * float(0x80000000L - latitude) / 3600000
if latitude < -90.0 or latitude > 90.0:
raise dns.exception.FormError("bad latitude")
if longitude > 0x80000000L:
longitude = float(longitude - 0x80000000L) / 3600000
else:
longitude = -1 * float(0x80000000L - longitude) / 3600000
if longitude < -180.0 or longitude > 180.0:
raise dns.exception.FormError("bad longitude")
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
def _get_float_latitude(self):
return _tuple_to_float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = _float_to_tuple(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return _tuple_to_float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = _float_to_tuple(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
| apache-2.0 |
ojake/django | tests/template_tests/test_nodelist.py | 151 | 3039 | from unittest import TestCase
from django.template import Context, Engine
from django.template.base import TextNode, VariableNode
from django.utils import six
class NodelistTest(TestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine()
super(NodelistTest, cls).setUpClass()
def test_for(self):
template = self.engine.from_string('{% for i in 1 %}{{ a }}{% endfor %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
template = self.engine.from_string('{% if x %}{{ a }}{% endif %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
template = self.engine.from_string('{% ifequal x y %}{{ a }}{% endifequal %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
template = self.engine.from_string('{% ifchanged x %}{{ a }}{% endifchanged %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class TextNodeTest(TestCase):
def test_textnode_repr(self):
engine = Engine()
for temptext, reprtext in [
("Hello, world!", "<TextNode: u'Hello, world!'>"),
("One\ntwo.", "<TextNode: u'One\\ntwo.'>"),
]:
template = engine.from_string(temptext)
texts = template.nodelist.get_nodes_by_type(TextNode)
if six.PY3:
reprtext = reprtext.replace("u'", "'")
self.assertEqual(repr(texts[0]), reprtext)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
engine = Engine(debug=True, libraries={'bad_tag': 'template_tests.templatetags.bad_tag'})
for source, expected_error_source_index in tests:
template = engine.from_string(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
debug = e.template_debug
self.assertEqual((debug['start'], debug['end']), expected_error_source_index)
| bsd-3-clause |
tchellomello/home-assistant | homeassistant/components/meteo_france/const.py | 1 | 4988 | """Meteo-France component constants."""
from homeassistant.const import (
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
PRESSURE_HPA,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
)
DOMAIN = "meteo_france"
PLATFORMS = ["sensor", "weather"]
COORDINATOR_FORECAST = "coordinator_forecast"
COORDINATOR_RAIN = "coordinator_rain"
COORDINATOR_ALERT = "coordinator_alert"
UNDO_UPDATE_LISTENER = "undo_update_listener"
ATTRIBUTION = "Data provided by Météo-France"
CONF_CITY = "city"
FORECAST_MODE_HOURLY = "hourly"
FORECAST_MODE_DAILY = "daily"
FORECAST_MODE = [FORECAST_MODE_HOURLY, FORECAST_MODE_DAILY]
ATTR_NEXT_RAIN_1_HOUR_FORECAST = "1_hour_forecast"
ATTR_NEXT_RAIN_DT_REF = "forecast_time_ref"
ENTITY_NAME = "name"
ENTITY_UNIT = "unit"
ENTITY_ICON = "icon"
ENTITY_DEVICE_CLASS = "device_class"
ENTITY_ENABLE = "enable"
ENTITY_API_DATA_PATH = "data_path"
SENSOR_TYPES = {
"pressure": {
ENTITY_NAME: "Pressure",
ENTITY_UNIT: PRESSURE_HPA,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:sea_level",
},
"rain_chance": {
ENTITY_NAME: "Rain chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-rainy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:rain:3h",
},
"snow_chance": {
ENTITY_NAME: "Snow chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-snowy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:snow:3h",
},
"freeze_chance": {
ENTITY_NAME: "Freeze chance",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:snowflake",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "probability_forecast:freezing",
},
"wind_speed": {
ENTITY_NAME: "Wind speed",
ENTITY_UNIT: SPEED_KILOMETERS_PER_HOUR,
ENTITY_ICON: "mdi:weather-windy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:wind:speed",
},
"next_rain": {
ENTITY_NAME: "Next rain",
ENTITY_UNIT: None,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: None,
},
"temperature": {
ENTITY_NAME: "Temperature",
ENTITY_UNIT: TEMP_CELSIUS,
ENTITY_ICON: None,
ENTITY_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ENTITY_ENABLE: False,
ENTITY_API_DATA_PATH: "current_forecast:T:value",
},
"uv": {
ENTITY_NAME: "UV",
ENTITY_UNIT: None,
ENTITY_ICON: "mdi:sunglasses",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "today_forecast:uv",
},
"weather_alert": {
ENTITY_NAME: "Weather alert",
ENTITY_UNIT: None,
ENTITY_ICON: "mdi:weather-cloudy-alert",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: None,
},
"precipitation": {
ENTITY_NAME: "Daily precipitation",
ENTITY_UNIT: "mm",
ENTITY_ICON: "mdi:cup-water",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "today_forecast:precipitation:24h",
},
"cloud": {
ENTITY_NAME: "Cloud cover",
ENTITY_UNIT: PERCENTAGE,
ENTITY_ICON: "mdi:weather-partly-cloudy",
ENTITY_DEVICE_CLASS: None,
ENTITY_ENABLE: True,
ENTITY_API_DATA_PATH: "current_forecast:clouds",
},
}
CONDITION_CLASSES = {
"clear-night": ["Nuit Claire", "Nuit claire"],
"cloudy": ["Très nuageux", "Couvert"],
"fog": [
"Brume ou bancs de brouillard",
"Brume",
"Brouillard",
"Brouillard givrant",
],
"hail": ["Risque de grêle", "Risque de grèle"],
"lightning": ["Risque d'orages", "Orages"],
"lightning-rainy": ["Pluie orageuses", "Pluies orageuses", "Averses orageuses"],
"partlycloudy": [
"Ciel voilé",
"Ciel voilé nuit",
"Éclaircies",
"Eclaircies",
"Peu nuageux",
],
"pouring": ["Pluie forte"],
"rainy": [
"Bruine / Pluie faible",
"Bruine",
"Pluie faible",
"Pluies éparses / Rares averses",
"Pluies éparses",
"Rares averses",
"Pluie modérée",
"Pluie / Averses",
"Averses",
"Pluie",
],
"snowy": [
"Neige / Averses de neige",
"Neige",
"Averses de neige",
"Neige forte",
"Quelques flocons",
],
"snowy-rainy": ["Pluie et neige", "Pluie verglaçante"],
"sunny": ["Ensoleillé"],
"windy": [],
"windy-variant": [],
"exceptional": [],
}
| apache-2.0 |
iohannez/gnuradio | gr-vocoder/examples/codec2_audio_loopback.py | 7 | 1623 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
from gnuradio.vocoder import codec2
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.codec2_encode_sp(codec2.MODE_2400)
dec = vocoder.codec2_decode_ps(codec2.MODE_2400)
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0 / 32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 |
dbertha/odoo | addons/stock/report/__init__.py | 376 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_stock
import report_stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
stevenewey/wagtail | wagtail/bin/wagtail.py | 10 | 2330 | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import os
import errno
import sys
from optparse import OptionParser
from django.core.management import ManagementUtility
def create_project(parser, options, args):
# Validate args
if len(args) < 2:
parser.error("Please specify a name for your wagtail installation")
elif len(args) > 2:
parser.error("Too many arguments")
project_name = args[1]
# Make sure given name is not already in use by another python package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
parser.error("'%s' conflicts with the name of an existing "
"Python module and cannot be used as a project "
"name. Please try another name." % project_name)
# Make sure directory does not already exist
if os.path.exists(project_name):
print('A directory called %(project_name)s already exists. \
Please choose another name for your wagtail project or remove the existing directory.' % {'project_name': project_name})
sys.exit(errno.EEXIST)
print("Creating a wagtail project called %(project_name)s" % {'project_name': project_name})
# Create the project from the wagtail template using startapp
# First find the path to wagtail
import wagtail
wagtail_path = os.path.dirname(wagtail.__file__)
template_path = os.path.join(wagtail_path, 'project_template')
# Call django-admin startproject
utility = ManagementUtility([
'django-admin.py', 'startproject',
'--template=' + template_path,
'--name=Vagrantfile', '--ext=html,rst',
project_name
])
utility.execute()
print("Success! %(project_name)s is created" % {'project_name': project_name})
COMMANDS = {
'start': create_project,
}
def main():
# Parse options
parser = OptionParser(usage="Usage: %prog start project_name")
(options, args) = parser.parse_args()
# Find command
try:
command = args[0]
except IndexError:
parser.print_help()
return
if command in COMMANDS:
COMMANDS[command](parser, options, args)
else:
parser.error("Unrecognised command: " + command)
if __name__ == "__main__":
main()
| bsd-3-clause |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/copy.py | 2 | 11239 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copy_reg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, long, float, bool, str, tuple,
frozenset, type, xrange, types.ClassType,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
for name in ("ComplexType", "UnicodeType", "CodeType"):
t = getattr(types, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[long] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[str] = _deepcopy_atomic
try:
d[unicode] = _deepcopy_atomic
except NameError:
pass
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[xrange] = _deepcopy_atomic
d[types.ClassType] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.iteritems():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.im_func, deepcopy(x.im_self, memo), x.im_class)
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
args = deepcopy(args, memo)
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.iteritems():
setattr(y, key, value)
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1==l
l1 = map(copy, l)
print l1==l
l1 = deepcopy(l)
print l1==l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
if __name__ == '__main__':
_test()
| apache-2.0 |
domob1812/PyBitmessage | src/tr.py | 15 | 1424 | import shared
import os
# This is used so that the translateText function can be used when we are in daemon mode and not using any QT functions.
class translateClass:
def __init__(self, context, text):
self.context = context
self.text = text
def arg(self,argument):
if '%' in self.text:
return translateClass(self.context, self.text.replace('%','',1)) # This doesn't actually do anything with the arguments because we don't have a UI in which to display this information anyway.
else:
return self.text
def _translate(context, text):
return translateText(context, text)
def translateText(context, text):
if not shared.safeConfigGetBoolean('bitmessagesettings', 'daemon'):
try:
from PyQt4 import QtCore, QtGui
except Exception as err:
print 'PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon'
print 'Error message:', err
os._exit(0)
return QtGui.QApplication.translate(context, text)
else:
if '%' in text:
return translateClass(context, text.replace('%','',1))
else:
return text | mit |
fdibaldassarre/mload | src/MangaManager.py | 1 | 5710 | #!/usr/bin/env python3
from datetime import datetime
from .Items import Manga
from .Items import Chapter
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
class MangaManager():
def __init__(self, mmachine, db, connection):
self.db = db
self.connection = connection
self.mmachine = mmachine
self.log = self.mmachine.getLoggerFor(self)
def getAllManga(self):
self.log.info('getAllManga == Get all manga')
query = 'SELECT Id, Name, Module, LastChapter, CheckValue, FolderName, LastUpdate FROM Manga ORDER BY Name'
self.db.execute(query)
manga_list = []
results = self.db.fetchall()
for data in results:
manga = self.createMangaFromDB(data)
manga_list.append(manga)
#sorted(manga_list, key = lambda manga : manga.getName())
return sorted(manga_list, key=lambda manga: manga.getName())
def addResultToArchive(self, result, last_chapter):
# add result to archive
self.log.info('addResultToArchive == Add result: ' + result.name + ', last chapter:' + str(last_chapter))
query = 'INSERT INTO Manga(Name, Module, LastChapter, CheckValue, FolderName, LastUpdate) VALUES (?,?,?,1,?,?)'
folder_name = result.name.replace('/', '_')
now = datetime.now()
last_update = now.strftime(DATE_FORMAT)
self.db.execute(query, (result.name, result.module.name, str(last_chapter), folder_name, last_update))
self.connection.commit()
last_inserted_id = self.db.lastrowid
manga = self.getMangaFromId(last_inserted_id)
return manga
def getMangaFromId(self, manga_id):
query = 'SELECT Id, Name, Module, LastChapter, CheckValue, FolderName, LastUpdate FROM Manga WHERE Id = ?'
self.db.execute(query, (manga_id,))
data = self.db.fetchone()
manga = self.createMangaFromDB(data)
return manga
def addChapterToManga(self, manga, chapter):
self.addChaptersToManga(manga, [chapter])
def addChaptersToManga(self, manga, chapters):
arg_list = []
''' NOTE: if I have a duplicate chapter the query stops before trying to add the other chapters
for chapter in chapters:
arg_list.append( (chapter.getNumber(), manga.id) )
query = 'INSERT INTO Chapters(Number, MangaId) VALUES (?,?)'
try:
self.db.executemany( query, arg_list )
except Exception:
pass # Some chapter numbers were already in the table, not a problem
'''
query = 'INSERT INTO Chapters(Number, MangaId) VALUES (?,?)'
for chapter in chapters:
try:
self.db.execute(query, (chapter.getNumber(), manga.id))
except Exception:
# duplicate chapter, not a problem
pass
self.connection.commit()
def refreshLastUpdateTime(self, manga):
query = 'UPDATE Manga SET LastUpdate = ? WHERE Id = ?'
now = datetime.now()
now_str = now.strftime(DATE_FORMAT)
self.db.execute(query, (now_str, manga.id))
self.connection.commit()
def setLastChapter(self, manga, chapter):
query = 'UPDATE Manga SET LastChapter = ? WHERE Id = ?'
self.db.execute(query, (str(chapter), manga.id))
self.connection.commit()
def removeChaptersFromManga(self, manga, chapters):
arg_list = []
for chapter in chapters:
arg_list.append((manga.id, chapter.getNumber()))
query = 'DELETE FROM Chapters WHERE MangaId = ? AND Number = ?'
self.db.executemany(query, arg_list)
self.connection.commit()
def editManga(self, manga, name, last_chapter, folder_name, check):
query = 'UPDATE Manga SET Name = ?, LastChapter = ?, FolderName = ?, CheckValue = ? WHERE Id = ?'
self.db.execute(query, (name, last_chapter, folder_name, check, manga.id))
self.connection.commit()
def changeModuleFor(self, manga, new_module_name):
query = 'UPDATE Manga SET Module = ? WHERE Id = ?'
self.db.execute(query, (new_module_name, manga.id))
self.connection.commit()
def removeManga(self, manga):
# delete OPT
self.removeOPTFiles(manga)
# delete chapters
self.removeAllChapters(manga)
# delete manga entry
query = 'DELETE FROM Manga WHERE Id = ?'
self.db.execute(query, (manga.id,))
self.connection.commit()
def removeOPTFiles(self, manga):
if manga.module is not None:
manga_ext = manga.module.getMangaEXT(manga)
manga.module.deleteManga(manga_ext)
# NOTE: i may use manga_ext.delete() directly
def removeAllChapters(self, manga):
query = 'DELETE FROM Chapters WHERE MangaId = ?'
self.db.execute(query, (manga.id,))
self.connection.commit()
def getChapters(self, manga):
query = 'SELECT Id, Number, MangaId FROM Chapters WHERE MangaId = ?'
self.db.execute(query, (manga.id,))
chapters_list = []
results = self.db.fetchall()
for data in results:
chapter = self.createChapterFromDB(data, manga)
chapters_list.append(chapter)
return chapters_list
def createMangaFromDB(self, data):
manga = Manga.new()
manga.id = data[0]
manga.name = data[1]
manga.module_name = data[2]
if manga.module_name in self.mmachine.modules:
mod = self.mmachine.modules[manga.module_name]
manga.setModule(mod)
manga.last_chapter = Chapter.ChapterNumber(data[3])
manga.check = int(data[4])
manga.folder_name = data[5]
manga.last_update = datetime.strptime(data[6], DATE_FORMAT)
return manga
def createChapterFromDB(self, data, manga):
chapter = Chapter.new()
chapter.id = data[0]
chapter.setNumber(data[1])
chapter.setManga(manga)
return chapter
def create(mmachine):
mm = MangaManager(mmachine, mmachine.manga_db, mmachine.manga_db_connection)
return mm
| gpl-3.0 |
andela-earinde/bellatrix-py | app/js/lib/lib/modules/grp.py | 2 | 1782 |
""" This module provides ctypes version of cpython's grp module
"""
from _pwdgrp_cffi import ffi, lib
import _structseq
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
class struct_group:
__metaclass__ = _structseq.structseqtype
name = "grp.struct_group"
gr_name = _structseq.structseqfield(0)
gr_passwd = _structseq.structseqfield(1)
gr_gid = _structseq.structseqfield(2)
gr_mem = _structseq.structseqfield(3)
def _group_from_gstruct(res):
i = 0
members = []
while res.gr_mem[i]:
members.append(ffi.string(res.gr_mem[i]))
i += 1
return struct_group([
ffi.string(res.gr_name),
ffi.string(res.gr_passwd),
res.gr_gid,
members])
@builtinify
def getgrgid(gid):
res = lib.getgrgid(gid)
if not res:
# XXX maybe check error eventually
raise KeyError(gid)
return _group_from_gstruct(res)
@builtinify
def getgrnam(name):
if not isinstance(name, basestring):
raise TypeError("expected string")
name = str(name)
res = lib.getgrnam(name)
if not res:
raise KeyError("'getgrnam(): name not found: %s'" % name)
return _group_from_gstruct(res)
@builtinify
def getgrall():
lib.setgrent()
lst = []
while 1:
p = lib.getgrent()
if not p:
break
lst.append(_group_from_gstruct(p))
lib.endgrent()
return lst
__all__ = ('struct_group', 'getgrgid', 'getgrnam', 'getgrall')
if __name__ == "__main__":
from os import getgid
gid = getgid()
pw = getgrgid(gid)
print("gid %s: %s" % (pw.gr_gid, pw))
name = pw.gr_name
print("name %r: %s" % (name, getgrnam(name)))
print("All:")
for pw in getgrall():
print(pw)
| mit |
ttx/summitsched | cheddar/sched.py | 1 | 4427 | # Copyright 2015 Thierry Carrez <thierry@openstack.org>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from cheddar.models import Track
from cheddar.session import Session
from cheddar.tracklead import is_valid_track
class API:
def __init__(self, settings):
self.schedurl = "http://%s/api" % settings.SCHED_SITE
self.api_key = settings.SCHED_API_KEY
def _call_sched(self, operation, **payload):
payload['api_key'] = self.api_key
payload['format'] = "json"
r = requests.post("%s/%s" % (self.schedurl, operation), params=payload)
if r.text != u'Ok':
return r.json()
else:
return {}
def _sched_to_session(self, schedjson):
session = Session(schedjson['event_key'])
session.start = schedjson['event_start']
session.end = schedjson['event_end']
session.room = schedjson['venue']
session.style = 'WORKROOM'
if session.id.startswith("Fish-"):
session.style = 'FISHBOWL'
if session.id.startswith("Meet-"):
session.style = 'MEETUP'
elements = session.id.split("-")
if len(elements) < 2:
session.maintrack = ""
else:
session.maintrack = elements[1]
session.extratracks = schedjson['event_type'].replace(
session.maintrack,"")
session.extratracks = session.extratracks.strip(" ,")
session.set_title(schedjson['name'])
try:
session.set_desc(schedjson['description'])
except KeyError:
session.description = "tbd"
return session
def _all_sessions(self):
ret = self._call_sched('session/list')
sessions = []
for sessionjson in ret:
sessions.append(self._sched_to_session(sessionjson))
return sessions
def list_sessions(self, trackid):
t = Track.objects.get(id=trackid)
def track_match(a):
return a.maintrack == t.name
filtered = filter(track_match, self._all_sessions())
return sorted(filtered, key=lambda x: x.start)
def get_session(self, sessionkey):
for session in self._all_sessions():
if session.id == sessionkey:
return session
raise IndexError
def modify_session(self, sessionkey, session):
# Sched clears "venue" information if you don't pass it again
old_session = self.get_session(sessionkey)
alltracks = session.maintrack
description = session.description
for track in session.extratracks.split(","):
track = track.strip().capitalize()
if is_valid_track(track):
print track
alltracks = "%s, %s" % (alltracks, track)
name = session.get_title()
description = session.get_desc()
self._call_sched('session/mod',
session_key=sessionkey,
name=name,
session_type=alltracks,
description=description,
venue=old_session.room)
def swap_sessions(self, sessionkey, session, session2key, session2):
self.modify_session(sessionkey, session2)
self.modify_session(session2key, session)
def create_session(self, index, day, starttime, endtime, title,
desc, track, room, style):
key = "%s-%s-%d" % (style.lower().capitalize()[0:4], track, index)
self._call_sched('session/add',
session_key=key,
name=title,
session_start=day + " " + starttime,
session_end=day + " " + endtime,
session_type=track,
description=desc,
venue=room)
| apache-2.0 |
ravangen/Wilson-Cam | camera.py | 1 | 1457 | from __future__ import print_function
from os import getenv, path
from subprocess import call
import sys
import dropbox
DROPBOX_IMAGE_NAME = '/img.jpeg'
FSWEBCAM_CONFIG_NAME = 'fswebcam.config'
def error(*objs):
print(*objs, file=sys.stderr)
def generate(file_name='image.jpeg'):
directory = path.dirname(path.realpath(__file__))
config_path = path.join(directory, FSWEBCAM_CONFIG_NAME)
image_path = path.join(directory, file_name)
call('fswebcam --config {config} {image}'.format(config=config_path, image=image_path), shell=True)
return image_path
def upload(file_path):
access_token = getenv('DROPBOX_TOKEN')
if not access_token:
raise ValueError('DROPBOX_TOKEN not set')
client = dropbox.client.DropboxClient(access_token)
# Get metadata of the current version of image
try:
file_metadata = client.metadata(DROPBOX_IMAGE_NAME)
except dropbox.rest.ErrorResponse:
file_metadata = {}
parent_rev = file_metadata.get('rev')
# Upload new version of image
file_obj = open(file_path, 'rb')
try:
response = client.put_file(full_path=DROPBOX_IMAGE_NAME, file_obj=file_obj, parent_rev=parent_rev)
print('uploaded: ', response)
except dropbox.rest.ErrorResponse as e:
error('error: ', e.body)
finally:
file_obj.close()
def main():
file_path = generate()
upload(file_path)
if __name__ == '__main__':
main()
| mit |
dauledk/tamigo_docs | docs/conf.py | 1 | 9838 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tamigo webservices documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 18 12:05:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
#'sphinxcontrib.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.extlinks']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tamigo webservices'
copyright = '2016, tamigo'
author= 'Tamigo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.7.3'
# The full version, including alpha/beta/rc tags.
release = '1.7.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Tamigo webservices v1.7.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
html_domain_indices = False
# If false, no index is generated.
#
html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tamigoapidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Tamigowebservices.tex', 'Tamigo webservices Documentation',
'tamigo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tamigowebservices', 'Tamigo webservices Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Tamigowebservices', 'Tamigo webservices Documentation',
author, 'Tamigowebservices', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| mit |
Hellybean/android_kernel_samsung_jf | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
yarda/dslib | pkcs7/asn1_models/digest_info.py | 1 | 1387 |
#* dslib - Python library for Datove schranky
#* Copyright (C) 2009-2012 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
'''
Created on Dec 9, 2009
'''
# dslib imports
from pyasn1.type import tag,namedtype,univ
from pyasn1 import error
# local imports
from general_types import AlgorithmIdentifier
class DigestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("digestAgorithm", AlgorithmIdentifier()),
namedtype.NamedType("digest", univ.OctetString())
)
| lgpl-2.1 |
Bachmann1234/letsencrypt | letshelp-letsencrypt/letshelp_letsencrypt/apache.py | 36 | 11029 | #!/usr/bin/env python
"""Let's Encrypt Apache configuration submission script"""
import argparse
import atexit
import contextlib
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import textwrap
_DESCRIPTION = """
Let's Help is a simple script you can run to help out the Let's Encrypt
project. Since Let's Encrypt will support automatically configuring HTTPS on
many servers, we want to test this functionality on as many configurations as
possible. This script will create a sanitized copy of your Apache
configuration, notifying you of the files that have been selected. If (and only
if) you approve this selection, these files will be sent to the Let's Encrypt
developers.
"""
_NO_APACHECTL = """
Unable to find `apachectl` which is required for this script to work. If it is
installed, please run this script again with the --apache-ctl command line
argument and the path to the binary.
"""
# Keywords likely to be found in filenames of sensitive files
_SENSITIVE_FILENAME_REGEX = re.compile(r"^(?!.*proxy_fdpass).*pass.*$|private|"
r"secret|cert|crt|key|rsa|dsa|pw|\.pem|"
r"\.der|\.p12|\.pfx|\.p7b")
def make_and_verify_selection(server_root, temp_dir):
"""Copies server_root to temp_dir and verifies selection with the user
:param str server_root: Path to the Apache server root
:param str temp_dir: Path to the temporary directory to copy files to
"""
copied_files, copied_dirs = copy_config(server_root, temp_dir)
print textwrap.fill("A secure copy of the files that have been selected "
"for submission has been created under {0}. All "
"comments have been removed and the files are only "
"accessible by the current user. A list of the files "
"that have been included is shown below. Please make "
"sure that this selection does not contain private "
"keys, passwords, or any other sensitive "
"information.".format(temp_dir))
print "\nFiles:"
for copied_file in copied_files:
print copied_file
print "Directories (including all contained files):"
for copied_dir in copied_dirs:
print copied_dir
sys.stdout.write("\nIs it safe to submit these files? ")
while True:
ans = raw_input("(Y)es/(N)o: ").lower()
if ans.startswith("y"):
return
elif ans.startswith("n"):
sys.exit("Your files were not submitted")
def copy_config(server_root, temp_dir):
"""Safely copies server_root to temp_dir and returns copied files
:param str server_root: Absolute path to the Apache server root
:param str temp_dir: Path to the temporary directory to copy files to
:returns: List of copied files and a list of leaf directories where
all contained files were copied
:rtype: `tuple` of `list` of `str`
"""
copied_files, copied_dirs = [], []
dir_len = len(os.path.dirname(server_root))
for config_path, config_dirs, config_files in os.walk(server_root):
temp_path = os.path.join(temp_dir, config_path[dir_len + 1:])
os.mkdir(temp_path)
copied_all = True
copied_files_in_current_dir = []
for config_file in config_files:
config_file_path = os.path.join(config_path, config_file)
temp_file_path = os.path.join(temp_path, config_file)
if os.path.islink(config_file_path):
os.symlink(os.readlink(config_file_path), temp_file_path)
elif safe_config_file(config_file_path):
copy_file_without_comments(config_file_path, temp_file_path)
copied_files_in_current_dir.append(config_file_path)
else:
copied_all = False
# If copied all files in leaf directory
if copied_all and not config_dirs:
copied_dirs.append(config_path)
else:
copied_files += copied_files_in_current_dir
return copied_files, copied_dirs
def copy_file_without_comments(source, destination):
"""Copies source to destination, removing comments
:param str source: Path to the file to be copied
:param str destination: Path where source should be copied to
"""
with open(source, "r") as infile:
with open(destination, "w") as outfile:
for line in infile:
if not (line.isspace() or line.lstrip().startswith("#")):
outfile.write(line)
def safe_config_file(config_file):
"""Returns True if config_file can be safely copied
:param str config_file: Path to an Apache configuration file
:returns: True if config_file can be safely copied
:rtype: bool
"""
config_file_lower = config_file.lower()
if _SENSITIVE_FILENAME_REGEX.search(config_file_lower):
return False
proc = subprocess.Popen(["file", config_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
file_output, _ = proc.communicate()
if "ASCII" in file_output:
possible_password_file = empty_or_all_comments = True
with open(config_file) as config_fd:
for line in config_fd:
if not (line.isspace() or line.lstrip().startswith("#")):
empty_or_all_comments = False
if line.startswith("-----BEGIN"):
return False
elif ":" not in line:
possible_password_file = False
# If file isn't empty or commented out and could be a password file,
# don't include it in selection. It is safe to include the file if
# it consists solely of comments because comments are removed before
# submission.
return empty_or_all_comments or not possible_password_file
return False
def setup_tempdir(args):
"""Creates a temporary directory and necessary files for config
:param argparse.Namespace args: Parsed command line arguments
:returns: Path to temporary directory
:rtype: str
"""
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, "config_file"), "w") as config_fd:
config_fd.write(args.config_file + "\n")
proc = subprocess.Popen([args.apache_ctl, "-v"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(os.path.join(tempdir, "version"), "w") as version_fd:
version_fd.write(proc.communicate()[0])
proc = subprocess.Popen([args.apache_ctl, "-d", args.server_root, "-f",
args.config_file, "-M"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(os.path.join(tempdir, "modules"), "w") as modules_fd:
modules_fd.write(proc.communicate()[0])
proc = subprocess.Popen([args.apache_ctl, "-d", args.server_root, "-f",
args.config_file, "-t", "-D", "DUMP_VHOSTS"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(os.path.join(tempdir, "vhosts"), "w") as vhosts_fd:
vhosts_fd.write(proc.communicate()[0])
return tempdir
def verify_config(args):
"""Verifies server_root and config_file specify a valid config
:param argparse.Namespace args: Parsed command line arguments
"""
with open(os.devnull, "w") as devnull:
try:
subprocess.check_call([args.apache_ctl, "-d", args.server_root,
"-f", args.config_file, "-t"],
stdout=devnull, stderr=subprocess.STDOUT)
except OSError:
sys.exit(_NO_APACHECTL)
except subprocess.CalledProcessError:
sys.exit("Syntax check from apachectl failed")
def locate_config(apache_ctl):
"""Uses the apachectl binary to find configuration files
:param str apache_ctl: Path to `apachectl` binary
:returns: Path to Apache server root and main configuration file
:rtype: `tuple` of `str`
"""
try:
proc = subprocess.Popen([apache_ctl, "-V"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, _ = proc.communicate()
except OSError:
sys.exit(_NO_APACHECTL)
server_root = config_file = ""
for line in output.splitlines():
# Relevant output lines are of the form: -D DIRECTIVE="VALUE"
if "HTTPD_ROOT" in line:
server_root = line[line.find('"') + 1:-1]
elif "SERVER_CONFIG_FILE" in line:
config_file = line[line.find('"') + 1:-1]
if not (server_root and config_file):
sys.exit("Unable to locate Apache configuration. Please run this "
"script again and specify --server-root and --config-file")
return server_root, config_file
def get_args():
"""Parses command line arguments
:returns: Parsed command line options
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(description=_DESCRIPTION)
parser.add_argument("-c", "--apache-ctl", default="apachectl",
help="path to the `apachectl` binary")
parser.add_argument("-d", "--server-root",
help=("location of the root directory of your Apache "
"configuration"))
parser.add_argument("-f", "--config-file",
help=("location of your main Apache configuration "
"file relative to the server root"))
args = parser.parse_args()
# args.server_root XOR args.config_file
if bool(args.server_root) != bool(args.config_file):
sys.exit("If either --server-root and --config-file are specified, "
"they both must be included")
elif args.server_root and args.config_file:
args.server_root = os.path.abspath(args.server_root)
args.config_file = os.path.abspath(args.config_file)
if args.config_file.startswith(args.server_root):
args.config_file = args.config_file[len(args.server_root) + 1:]
else:
sys.exit("This script expects the Apache configuration file to be "
"inside the server root")
return args
def main():
"""Main script execution"""
args = get_args()
if args.server_root is None:
args.server_root, args.config_file = locate_config(args.apache_ctl)
verify_config(args)
tempdir = setup_tempdir(args)
atexit.register(lambda: shutil.rmtree(tempdir))
make_and_verify_selection(args.server_root, tempdir)
tarpath = os.path.join(tempdir, "config.tar.gz")
# contextlib.closing used for py26 support
with contextlib.closing(tarfile.open(tarpath, mode="w:gz")) as tar:
tar.add(tempdir, arcname=".")
# TODO: Submit tarpath
if __name__ == "__main__":
main() # pragma: no cover
| apache-2.0 |
Godiyos/python-for-android | python-modules/twisted/twisted/spread/pb.py | 57 | 47156 | # -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Perspective Broker
\"This isn\'t a professional opinion, but it's probably got enough
internet to kill you.\" --glyph
Introduction
============
This is a broker for proxies for and copies of objects. It provides a
translucent interface layer to those proxies.
The protocol is not opaque, because it provides objects which represent the
remote proxies and require no context (server references, IDs) to operate on.
It is not transparent because it does I{not} attempt to make remote objects
behave identically, or even similiarly, to local objects. Method calls are
invoked asynchronously, and specific rules are applied when serializing
arguments.
To get started, begin with L{PBClientFactory} and L{PBServerFactory}.
@author: Glyph Lefkowitz
"""
import random
import new
import types
from zope.interface import implements, Interface
# Twisted Imports
from twisted.python import log, failure, reflect
from twisted.python.versions import Version
from twisted.python.deprecate import deprecated
from twisted.python.hashlib import md5
from twisted.internet import defer, protocol
from twisted.cred.portal import Portal
from twisted.cred.credentials import IAnonymous, ICredentials
from twisted.cred.credentials import IUsernameHashedPassword, Anonymous
from twisted.persisted import styles
from twisted.python.components import registerAdapter
from twisted.spread.interfaces import IJellyable, IUnjellyable
from twisted.spread.jelly import jelly, unjelly, globalSecurity
from twisted.spread import banana
from twisted.spread.flavors import Serializable
from twisted.spread.flavors import Referenceable, NoSuchMethod
from twisted.spread.flavors import Root, IPBRoot
from twisted.spread.flavors import ViewPoint
from twisted.spread.flavors import Viewable
from twisted.spread.flavors import Copyable
from twisted.spread.flavors import Jellyable
from twisted.spread.flavors import Cacheable
from twisted.spread.flavors import RemoteCopy
from twisted.spread.flavors import RemoteCache
from twisted.spread.flavors import RemoteCacheObserver
from twisted.spread.flavors import copyTags
from twisted.spread.flavors import setUnjellyableForClass
from twisted.spread.flavors import setUnjellyableFactoryForClass
from twisted.spread.flavors import setUnjellyableForClassTree
# These three are backwards compatibility aliases for the previous three.
# Ultimately they should be deprecated. -exarkun
from twisted.spread.flavors import setCopierForClass
from twisted.spread.flavors import setFactoryForClass
from twisted.spread.flavors import setCopierForClassTree
MAX_BROKER_REFS = 1024
portno = 8787
class ProtocolError(Exception):
"""
This error is raised when an invalid protocol statement is received.
"""
class DeadReferenceError(ProtocolError):
"""
This error is raised when a method is called on a dead reference (one whose
broker has been disconnected).
"""
class Error(Exception):
"""
This error can be raised to generate known error conditions.
When a PB callable method (perspective_, remote_, view_) raises
this error, it indicates that a traceback should not be printed,
but instead, the string representation of the exception should be
sent.
"""
class RemoteMethod:
"""This is a translucent reference to a remote message.
"""
def __init__(self, obj, name):
"""Initialize with a L{RemoteReference} and the name of this message.
"""
self.obj = obj
self.name = name
def __cmp__(self, other):
return cmp((self.obj, self.name), other)
def __hash__(self):
return hash((self.obj, self.name))
def __call__(self, *args, **kw):
"""Asynchronously invoke a remote method.
"""
return self.obj.broker._sendMessage('',self.obj.perspective, self.obj.luid, self.name, args, kw)
def noOperation(*args, **kw):
"""
Do nothing.
Neque porro quisquam est qui dolorem ipsum quia dolor sit amet,
consectetur, adipisci velit...
"""
noOperation = deprecated(Version("twisted", 8, 2, 0))(noOperation)
class PBConnectionLost(Exception):
pass
def printTraceback(tb):
"""
Print a traceback (string) to the standard log.
"""
log.msg('Perspective Broker Traceback:' )
log.msg(tb)
printTraceback = deprecated(Version("twisted", 8, 2, 0))(printTraceback)
class IPerspective(Interface):
"""
per*spec*tive, n. : The relationship of aspects of a subject to each
other and to a whole: 'a perspective of history'; 'a need to view
the problem in the proper perspective'.
This is a Perspective Broker-specific wrapper for an avatar. That
is to say, a PB-published view on to the business logic for the
system's concept of a 'user'.
The concept of attached/detached is no longer implemented by the
framework. The realm is expected to implement such semantics if
needed.
"""
def perspectiveMessageReceived(broker, message, args, kwargs):
"""
This method is called when a network message is received.
@arg broker: The Perspective Broker.
@type message: str
@arg message: The name of the method called by the other end.
@type args: list in jelly format
@arg args: The arguments that were passed by the other end. It
is recommend that you use the `unserialize' method of the
broker to decode this.
@type kwargs: dict in jelly format
@arg kwargs: The keyword arguments that were passed by the
other end. It is recommended that you use the
`unserialize' method of the broker to decode this.
@rtype: A jelly list.
@return: It is recommended that you use the `serialize' method
of the broker on whatever object you need to return to
generate the return value.
"""
class Avatar:
"""
A default IPerspective implementor.
This class is intended to be subclassed, and a realm should return
an instance of such a subclass when IPerspective is requested of
it.
A peer requesting a perspective will receive only a
L{RemoteReference} to a pb.Avatar. When a method is called on
that L{RemoteReference}, it will translate to a method on the
remote perspective named 'perspective_methodname'. (For more
information on invoking methods on other objects, see
L{flavors.ViewPoint}.)
"""
implements(IPerspective)
def perspectiveMessageReceived(self, broker, message, args, kw):
"""
This method is called when a network message is received.
This will call::
self.perspective_%(message)s(*broker.unserialize(args),
**broker.unserialize(kw))
to handle the method; subclasses of Avatar are expected to
implement methods using this naming convention.
"""
args = broker.unserialize(args, self)
kw = broker.unserialize(kw, self)
method = getattr(self, "perspective_%s" % message)
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self, method, args, kw)
class AsReferenceable(Referenceable):
"""
A reference directed towards another object.
"""
def __init__(self, object, messageType="remote"):
self.remoteMessageReceived = getattr(
object, messageType + "MessageReceived")
class RemoteReference(Serializable, styles.Ephemeral):
"""
A translucent reference to a remote object.
I may be a reference to a L{flavors.ViewPoint}, a
L{flavors.Referenceable}, or an L{IPerspective} implementor (e.g.,
pb.Avatar). From the client's perspective, it is not possible to
tell which except by convention.
I am a \"translucent\" reference because although no additional
bookkeeping overhead is given to the application programmer for
manipulating a reference, return values are asynchronous.
See also L{twisted.internet.defer}.
@ivar broker: The broker I am obtained through.
@type broker: L{Broker}
"""
implements(IUnjellyable)
def __init__(self, perspective, broker, luid, doRefCount):
"""(internal) Initialize me with a broker and a locally-unique ID.
The ID is unique only to the particular Perspective Broker
instance.
"""
self.luid = luid
self.broker = broker
self.doRefCount = doRefCount
self.perspective = perspective
self.disconnectCallbacks = []
def notifyOnDisconnect(self, callback):
"""Register a callback to be called if our broker gets disconnected.
This callback will be called with one argument, this instance.
"""
assert callable(callback)
self.disconnectCallbacks.append(callback)
if len(self.disconnectCallbacks) == 1:
self.broker.notifyOnDisconnect(self._disconnected)
def dontNotifyOnDisconnect(self, callback):
"""Remove a callback that was registered with notifyOnDisconnect."""
self.disconnectCallbacks.remove(callback)
if not self.disconnectCallbacks:
self.broker.dontNotifyOnDisconnect(self._disconnected)
def _disconnected(self):
"""Called if we are disconnected and have callbacks registered."""
for callback in self.disconnectCallbacks:
callback(self)
self.disconnectCallbacks = None
def jellyFor(self, jellier):
"""If I am being sent back to where I came from, serialize as a local backreference.
"""
if jellier.invoker:
assert self.broker == jellier.invoker, "Can't send references to brokers other than their own."
return "local", self.luid
else:
return "unpersistable", "References cannot be serialized"
def unjellyFor(self, unjellier, unjellyList):
self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1)
return self
def callRemote(self, _name, *args, **kw):
"""Asynchronously invoke a remote method.
@type _name: C{string}
@param _name: the name of the remote method to invoke
@param args: arguments to serialize for the remote function
@param kw: keyword arguments to serialize for the remote function.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred which will be fired when the result of
this remote call is received.
"""
# note that we use '_name' instead of 'name' so the user can call
# remote methods with 'name' as a keyword parameter, like this:
# ref.callRemote("getPeopleNamed", count=12, name="Bob")
return self.broker._sendMessage('',self.perspective, self.luid,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{RemoteMethod} for this key.
"""
return RemoteMethod(self, key)
def __cmp__(self,other):
"""Compare me [to another L{RemoteReference}].
"""
if isinstance(other, RemoteReference):
if other.broker == self.broker:
return cmp(self.luid, other.luid)
return cmp(self.broker, other)
def __hash__(self):
"""Hash me.
"""
return self.luid
def __del__(self):
"""Do distributed reference counting on finalization.
"""
if self.doRefCount:
self.broker.sendDecRef(self.luid)
setUnjellyableForClass("remote", RemoteReference)
class Local:
"""(internal) A reference to a local object.
"""
def __init__(self, object, perspective=None):
"""Initialize.
"""
self.object = object
self.perspective = perspective
self.refcount = 1
def __repr__(self):
return "<pb.Local %r ref:%s>" % (self.object, self.refcount)
def incref(self):
"""Increment and return my reference count.
"""
self.refcount = self.refcount + 1
return self.refcount
def decref(self):
"""Decrement and return my reference count.
"""
self.refcount = self.refcount - 1
return self.refcount
##
# Failure
##
class CopyableFailure(failure.Failure, Copyable):
"""
A L{flavors.RemoteCopy} and L{flavors.Copyable} version of
L{twisted.python.failure.Failure} for serialization.
"""
unsafeTracebacks = 0
def getStateToCopy(self):
"""
Collect state related to the exception which occurred, discarding
state which cannot reasonably be serialized.
"""
state = self.__dict__.copy()
state['tb'] = None
state['frames'] = []
state['stack'] = []
if isinstance(self.value, failure.Failure):
state['value'] = failure2Copyable(self.value, self.unsafeTracebacks)
else:
state['value'] = str(self.value) # Exception instance
if isinstance(self.type, str):
state['type'] = self.type
else:
state['type'] = reflect.qual(self.type) # Exception class
if self.unsafeTracebacks:
state['traceback'] = self.getTraceback()
else:
state['traceback'] = 'Traceback unavailable\n'
return state
class CopiedFailure(RemoteCopy, failure.Failure):
def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'):
if file is None:
file = log.logfile
file.write("Traceback from remote host -- ")
file.write(self.traceback)
printBriefTraceback = printTraceback
printDetailedTraceback = printTraceback
setUnjellyableForClass(CopyableFailure, CopiedFailure)
def failure2Copyable(fail, unsafeTracebacks=0):
f = new.instance(CopyableFailure, fail.__dict__)
f.unsafeTracebacks = unsafeTracebacks
return f
class Broker(banana.Banana):
"""I am a broker for objects.
"""
version = 6
username = None
factory = None
def __init__(self, isClient=1, security=globalSecurity):
banana.Banana.__init__(self, isClient)
self.disconnected = 0
self.disconnects = []
self.failures = []
self.connects = []
self.localObjects = {}
self.security = security
self.pageProducers = []
self.currentRequestID = 0
self.currentLocalID = 0
# Some terms:
# PUID: process unique ID; return value of id() function. type "int".
# LUID: locally unique ID; an ID unique to an object mapped over this
# connection. type "int"
# GUID: (not used yet) globally unique ID; an ID for an object which
# may be on a redirected or meta server. Type as yet undecided.
# Dictionary mapping LUIDs to local objects.
# set above to allow root object to be assigned before connection is made
# self.localObjects = {}
# Dictionary mapping PUIDs to LUIDs.
self.luids = {}
# Dictionary mapping LUIDs to local (remotely cached) objects. Remotely
# cached means that they're objects which originate here, and were
# copied remotely.
self.remotelyCachedObjects = {}
# Dictionary mapping PUIDs to (cached) LUIDs
self.remotelyCachedLUIDs = {}
# Dictionary mapping (remote) LUIDs to (locally cached) objects.
self.locallyCachedObjects = {}
self.waitingForAnswers = {}
# Mapping from LUIDs to weakref objects with callbacks for performing
# any local cleanup which may be necessary for the corresponding
# object once it no longer exists.
self._localCleanup = {}
def resumeProducing(self):
"""Called when the consumer attached to me runs out of buffer.
"""
# Go backwards over the list so we can remove indexes from it as we go
for pageridx in xrange(len(self.pageProducers)-1, -1, -1):
pager = self.pageProducers[pageridx]
pager.sendNextPage()
if not pager.stillPaging():
del self.pageProducers[pageridx]
if not self.pageProducers:
self.transport.unregisterProducer()
# Streaming producer methods; not necessary to implement.
def pauseProducing(self):
pass
def stopProducing(self):
pass
def registerPageProducer(self, pager):
self.pageProducers.append(pager)
if len(self.pageProducers) == 1:
self.transport.registerProducer(self, 0)
def expressionReceived(self, sexp):
"""Evaluate an expression as it's received.
"""
if isinstance(sexp, types.ListType):
command = sexp[0]
methodName = "proto_%s" % command
method = getattr(self, methodName, None)
if method:
method(*sexp[1:])
else:
self.sendCall("didNotUnderstand", command)
else:
raise ProtocolError("Non-list expression received.")
def proto_version(self, vnum):
"""Protocol message: (version version-number)
Check to make sure that both ends of the protocol are speaking
the same version dialect.
"""
if vnum != self.version:
raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum))
def sendCall(self, *exp):
"""Utility method to send an expression to the other side of the connection.
"""
self.sendEncoded(exp)
def proto_didNotUnderstand(self, command):
"""Respond to stock 'C{didNotUnderstand}' message.
Log the command that was not understood and continue. (Note:
this will probably be changed to close the connection or raise
an exception in the future.)
"""
log.msg("Didn't understand command: %r" % command)
def connectionReady(self):
"""Initialize. Called after Banana negotiation is done.
"""
self.sendCall("version", self.version)
for notifier in self.connects:
try:
notifier()
except:
log.deferr()
self.connects = None
if self.factory: # in tests we won't have factory
self.factory.clientConnectionMade(self)
def connectionFailed(self):
# XXX should never get called anymore? check!
for notifier in self.failures:
try:
notifier()
except:
log.deferr()
self.failures = None
waitingForAnswers = None
def connectionLost(self, reason):
"""The connection was lost.
"""
self.disconnected = 1
# nuke potential circular references.
self.luids = None
if self.waitingForAnswers:
for d in self.waitingForAnswers.values():
try:
d.errback(failure.Failure(PBConnectionLost(reason)))
except:
log.deferr()
# Assure all Cacheable.stoppedObserving are called
for lobj in self.remotelyCachedObjects.values():
cacheable = lobj.object
perspective = lobj.perspective
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
# Loop on a copy to prevent notifiers to mixup
# the list by calling dontNotifyOnDisconnect
for notifier in self.disconnects[:]:
try:
notifier()
except:
log.deferr()
self.disconnects = None
self.waitingForAnswers = None
self.localSecurity = None
self.remoteSecurity = None
self.remotelyCachedObjects = None
self.remotelyCachedLUIDs = None
self.locallyCachedObjects = None
self.localObjects = None
def notifyOnDisconnect(self, notifier):
"""Call the given callback when the Broker disconnects."""
assert callable(notifier)
self.disconnects.append(notifier)
def notifyOnFail(self, notifier):
"""Call the given callback if the Broker fails to connect."""
assert callable(notifier)
self.failures.append(notifier)
def notifyOnConnect(self, notifier):
"""Call the given callback when the Broker connects."""
assert callable(notifier)
if self.connects is None:
try:
notifier()
except:
log.err()
else:
self.connects.append(notifier)
def dontNotifyOnDisconnect(self, notifier):
"""Remove a callback from list of disconnect callbacks."""
try:
self.disconnects.remove(notifier)
except ValueError:
pass
def localObjectForID(self, luid):
"""
Get a local object for a locally unique ID.
@return: An object previously stored with L{registerReference} or
C{None} if there is no object which corresponds to the given
identifier.
"""
lob = self.localObjects.get(luid)
if lob is None:
return
return lob.object
maxBrokerRefsViolations = 0
def registerReference(self, object):
"""Get an ID for a local object.
Store a persistent reference to a local object and map its id()
to a generated, session-unique ID and return that ID.
"""
assert object is not None
puid = object.processUniqueID()
luid = self.luids.get(puid)
if luid is None:
if len(self.localObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB reference count exceeded. "
"Goodbye.")
raise Error("Maximum PB reference count exceeded.")
luid = self.newLocalID()
self.localObjects[luid] = Local(object)
self.luids[puid] = luid
else:
self.localObjects[luid].incref()
return luid
def setNameForLocal(self, name, object):
"""Store a special (string) ID for this object.
This is how you specify a 'base' set of objects that the remote
protocol can connect to.
"""
assert object is not None
self.localObjects[name] = Local(object)
def remoteForName(self, name):
"""Returns an object from the remote name mapping.
Note that this does not check the validity of the name, only
creates a translucent reference for it.
"""
return RemoteReference(None, self, name, 0)
def cachedRemotelyAs(self, instance, incref=0):
"""Returns an ID that says what this instance is cached as remotely, or C{None} if it's not.
"""
puid = instance.processUniqueID()
luid = self.remotelyCachedLUIDs.get(puid)
if (luid is not None) and (incref):
self.remotelyCachedObjects[luid].incref()
return luid
def remotelyCachedForLUID(self, luid):
"""Returns an instance which is cached remotely, with this LUID.
"""
return self.remotelyCachedObjects[luid].object
def cacheRemotely(self, instance):
"""
XXX"""
puid = instance.processUniqueID()
luid = self.newLocalID()
if len(self.remotelyCachedObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB cache count exceeded. "
"Goodbye.")
raise Error("Maximum PB cache count exceeded.")
self.remotelyCachedLUIDs[puid] = luid
# This table may not be necessary -- for now, it's to make sure that no
# monkey business happens with id(instance)
self.remotelyCachedObjects[luid] = Local(instance, self.serializingPerspective)
return luid
def cacheLocally(self, cid, instance):
"""(internal)
Store a non-filled-out cached instance locally.
"""
self.locallyCachedObjects[cid] = instance
def cachedLocallyAs(self, cid):
instance = self.locallyCachedObjects[cid]
return instance
def serialize(self, object, perspective=None, method=None, args=None, kw=None):
"""Jelly an object according to the remote security rules for this broker.
"""
if isinstance(object, defer.Deferred):
object.addCallbacks(self.serialize, lambda x: x,
callbackKeywords={
'perspective': perspective,
'method': method,
'args': args,
'kw': kw
})
return object
# XXX This call is NOT REENTRANT and testing for reentrancy is just
# crazy, so it likely won't be. Don't ever write methods that call the
# broker's serialize() method recursively (e.g. sending a method call
# from within a getState (this causes concurrency problems anyway so
# you really, really shouldn't do it))
# self.jellier = _NetJellier(self)
self.serializingPerspective = perspective
self.jellyMethod = method
self.jellyArgs = args
self.jellyKw = kw
try:
return jelly(object, self.security, None, self)
finally:
self.serializingPerspective = None
self.jellyMethod = None
self.jellyArgs = None
self.jellyKw = None
def unserialize(self, sexp, perspective = None):
"""Unjelly an sexp according to the local security rules for this broker.
"""
self.unserializingPerspective = perspective
try:
return unjelly(sexp, self.security, None, self)
finally:
self.unserializingPerspective = None
def newLocalID(self):
"""Generate a new LUID.
"""
self.currentLocalID = self.currentLocalID + 1
return self.currentLocalID
def newRequestID(self):
"""Generate a new request ID.
"""
self.currentRequestID = self.currentRequestID + 1
return self.currentRequestID
def _sendMessage(self, prefix, perspective, objectID, message, args, kw):
pbc = None
pbe = None
answerRequired = 1
if kw.has_key('pbcallback'):
pbc = kw['pbcallback']
del kw['pbcallback']
if kw.has_key('pberrback'):
pbe = kw['pberrback']
del kw['pberrback']
if kw.has_key('pbanswer'):
assert (not pbe) and (not pbc), "You can't specify a no-answer requirement."
answerRequired = kw['pbanswer']
del kw['pbanswer']
if self.disconnected:
raise DeadReferenceError("Calling Stale Broker")
try:
netArgs = self.serialize(args, perspective=perspective, method=message)
netKw = self.serialize(kw, perspective=perspective, method=message)
except:
return defer.fail(failure.Failure())
requestID = self.newRequestID()
if answerRequired:
rval = defer.Deferred()
self.waitingForAnswers[requestID] = rval
if pbc or pbe:
log.msg('warning! using deprecated "pbcallback"')
rval.addCallbacks(pbc, pbe)
else:
rval = None
self.sendCall(prefix+"message", requestID, objectID, message, answerRequired, netArgs, netKw)
return rval
def proto_message(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.localObjectForID, requestID, objectID, message, answerRequired, netArgs, netKw)
def proto_cachemessage(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.cachedLocallyAs, requestID, objectID, message, answerRequired, netArgs, netKw)
def _recvMessage(self, findObjMethod, requestID, objectID, message, answerRequired, netArgs, netKw):
"""Received a message-send.
Look up message based on object, unserialize the arguments, and
invoke it with args, and send an 'answer' or 'error' response.
"""
try:
object = findObjMethod(objectID)
if object is None:
raise Error("Invalid Object ID")
netResult = object.remoteMessageReceived(self, message, netArgs, netKw)
except Error, e:
if answerRequired:
# If the error is Jellyable or explicitly allowed via our
# security options, send it back and let the code on the
# other end deal with unjellying. If it isn't Jellyable,
# wrap it in a CopyableFailure, which ensures it can be
# unjellied on the other end. We have to do this because
# all errors must be sent back.
if isinstance(e, Jellyable) or self.security.isClassAllowed(e.__class__):
self._sendError(e, requestID)
else:
self._sendError(CopyableFailure(e), requestID)
except:
if answerRequired:
log.msg("Peer will receive following PB traceback:", isError=True)
f = CopyableFailure()
self._sendError(f, requestID)
log.err()
else:
if answerRequired:
if isinstance(netResult, defer.Deferred):
args = (requestID,)
netResult.addCallbacks(self._sendAnswer, self._sendFailureOrError,
callbackArgs=args, errbackArgs=args)
# XXX Should this be done somewhere else?
else:
self._sendAnswer(netResult, requestID)
##
# success
##
def _sendAnswer(self, netResult, requestID):
"""(internal) Send an answer to a previously sent message.
"""
self.sendCall("answer", requestID, netResult)
def proto_answer(self, requestID, netResult):
"""(internal) Got an answer to a previously sent message.
Look up the appropriate callback and call it.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.callback(self.unserialize(netResult))
##
# failure
##
def _sendFailureOrError(self, fail, requestID):
"""
Call L{_sendError} or L{_sendFailure}, depending on whether C{fail}
represents an L{Error} subclass or not.
"""
if fail.check(Error) is None:
self._sendFailure(fail, requestID)
else:
self._sendError(fail, requestID)
def _sendFailure(self, fail, requestID):
"""Log error and then send it."""
log.msg("Peer will receive following PB traceback:")
log.err(fail)
self._sendError(fail, requestID)
def _sendError(self, fail, requestID):
"""(internal) Send an error for a previously sent message.
"""
if isinstance(fail, failure.Failure):
# If the failures value is jellyable or allowed through security,
# send the value
if (isinstance(fail.value, Jellyable) or
self.security.isClassAllowed(fail.value.__class__)):
fail = fail.value
elif not isinstance(fail, CopyableFailure):
fail = failure2Copyable(fail, self.factory.unsafeTracebacks)
if isinstance(fail, CopyableFailure):
fail.unsafeTracebacks = self.factory.unsafeTracebacks
self.sendCall("error", requestID, self.serialize(fail))
def proto_error(self, requestID, fail):
"""(internal) Deal with an error.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.errback(self.unserialize(fail))
##
# refcounts
##
def sendDecRef(self, objectID):
"""(internal) Send a DECREF directive.
"""
self.sendCall("decref", objectID)
def proto_decref(self, objectID):
"""(internal) Decrement the reference count of an object.
If the reference count is zero, it will free the reference to this
object.
"""
refs = self.localObjects[objectID].decref()
if refs == 0:
puid = self.localObjects[objectID].object.processUniqueID()
del self.luids[puid]
del self.localObjects[objectID]
self._localCleanup.pop(puid, lambda: None)()
##
# caching
##
def decCacheRef(self, objectID):
"""(internal) Send a DECACHE directive.
"""
self.sendCall("decache", objectID)
def proto_decache(self, objectID):
"""(internal) Decrement the reference count of a cached object.
If the reference count is zero, free the reference, then send an
'uncached' directive.
"""
refs = self.remotelyCachedObjects[objectID].decref()
# log.msg('decaching: %s #refs: %s' % (objectID, refs))
if refs == 0:
lobj = self.remotelyCachedObjects[objectID]
cacheable = lobj.object
perspective = lobj.perspective
# TODO: force_decache needs to be able to force-invalidate a
# cacheable reference.
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
puid = cacheable.processUniqueID()
del self.remotelyCachedLUIDs[puid]
del self.remotelyCachedObjects[objectID]
self.sendCall("uncache", objectID)
def proto_uncache(self, objectID):
"""(internal) Tell the client it is now OK to uncache an object.
"""
# log.msg("uncaching locally %d" % objectID)
obj = self.locallyCachedObjects[objectID]
obj.broker = None
## def reallyDel(obj=obj):
## obj.__really_del__()
## obj.__del__ = reallyDel
del self.locallyCachedObjects[objectID]
def respond(challenge, password):
"""Respond to a challenge.
This is useful for challenge/response authentication.
"""
m = md5()
m.update(password)
hashedPassword = m.digest()
m = md5()
m.update(hashedPassword)
m.update(challenge)
doubleHashedPassword = m.digest()
return doubleHashedPassword
def challenge():
"""I return some random data."""
crap = ''
for x in range(random.randrange(15,25)):
crap = crap + chr(random.randint(65,90))
crap = md5(crap).digest()
return crap
class PBClientFactory(protocol.ClientFactory):
"""
Client factory for PB brokers.
As with all client factories, use with reactor.connectTCP/SSL/etc..
getPerspective and getRootObject can be called either before or
after the connect.
"""
protocol = Broker
unsafeTracebacks = False
def __init__(self, unsafeTracebacks=False, security=globalSecurity):
"""
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.unsafeTracebacks = unsafeTracebacks
self.security = security
self._reset()
def buildProtocol(self, addr):
"""
Build the broker instance, passing the security options to it.
"""
p = self.protocol(isClient=True, security=self.security)
p.factory = self
return p
def _reset(self):
self.rootObjectRequests = [] # list of deferred
self._broker = None
self._root = None
def _failAll(self, reason):
deferreds = self.rootObjectRequests
self._reset()
for d in deferreds:
d.errback(reason)
def clientConnectionFailed(self, connector, reason):
self._failAll(reason)
def clientConnectionLost(self, connector, reason, reconnecting=0):
"""Reconnecting subclasses should call with reconnecting=1."""
if reconnecting:
# any pending requests will go to next connection attempt
# so we don't fail them.
self._broker = None
self._root = None
else:
self._failAll(reason)
def clientConnectionMade(self, broker):
self._broker = broker
self._root = broker.remoteForName("root")
ds = self.rootObjectRequests
self.rootObjectRequests = []
for d in ds:
d.callback(self._root)
def getRootObject(self):
"""Get root object of remote PB server.
@return: Deferred of the root object.
"""
if self._broker and not self._broker.disconnected:
return defer.succeed(self._root)
d = defer.Deferred()
self.rootObjectRequests.append(d)
return d
def disconnect(self):
"""If the factory is connected, close the connection.
Note that if you set up the factory to reconnect, you will need to
implement extra logic to prevent automatic reconnection after this
is called.
"""
if self._broker:
self._broker.transport.loseConnection()
def _cbSendUsername(self, root, username, password, client):
return root.callRemote("login", username).addCallback(
self._cbResponse, password, client)
def _cbResponse(self, (challenge, challenger), password, client):
return challenger.callRemote("respond", respond(challenge, password), client)
def _cbLoginAnonymous(self, root, client):
"""
Attempt an anonymous login on the given remote root object.
@type root: L{RemoteReference}
@param root: The object on which to attempt the login, most likely
returned by a call to L{PBClientFactory.getRootObject}.
@param client: A jellyable object which will be used as the I{mind}
parameter for the login attempt.
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} to an avatar when anonymous login succeeds, or
which will errback if anonymous login fails.
"""
return root.callRemote("loginAnonymous", client)
def login(self, credentials, client=None):
"""
Login and get perspective from remote PB server.
Currently the following credentials are supported::
L{twisted.cred.credentials.IUsernamePassword}
L{twisted.cred.credentials.IAnonymous}
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} for the avatar logged in to, or which will
errback if login fails.
"""
d = self.getRootObject()
if IAnonymous.providedBy(credentials):
d.addCallback(self._cbLoginAnonymous, client)
else:
d.addCallback(
self._cbSendUsername, credentials.username,
credentials.password, client)
return d
class PBServerFactory(protocol.ServerFactory):
"""
Server factory for perspective broker.
Login is done using a Portal object, whose realm is expected to return
avatars implementing IPerspective. The credential checkers in the portal
should accept IUsernameHashedPassword or IUsernameMD5Password.
Alternatively, any object providing or adaptable to L{IPBRoot} can be
used instead of a portal to provide the root object of the PB server.
"""
unsafeTracebacks = False
# object broker factory
protocol = Broker
def __init__(self, root, unsafeTracebacks=False, security=globalSecurity):
"""
@param root: factory providing the root Referenceable used by the broker.
@type root: object providing or adaptable to L{IPBRoot}.
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.root = IPBRoot(root)
self.unsafeTracebacks = unsafeTracebacks
self.security = security
def buildProtocol(self, addr):
"""
Return a Broker attached to the factory (as the service provider).
"""
proto = self.protocol(isClient=False, security=self.security)
proto.factory = self
proto.setNameForLocal("root", self.root.rootObject(proto))
return proto
def clientConnectionMade(self, protocol):
# XXX does this method make any sense?
pass
class IUsernameMD5Password(ICredentials):
"""
I encapsulate a username and a hashed password.
This credential is used for username/password over PB. CredentialCheckers
which check this kind of credential must store the passwords in plaintext
form or as a MD5 digest.
@type username: C{str} or C{Deferred}
@ivar username: The username associated with these credentials.
"""
def checkPassword(password):
"""
Validate these credentials against the correct password.
@type password: C{str}
@param password: The correct, plaintext password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given password, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
def checkMD5Password(password):
"""
Validate these credentials against the correct MD5 digest of the
password.
@type password: C{str}
@param password: The correct MD5 digest of a password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given digest, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
class _PortalRoot:
"""Root object, used to login to portal."""
implements(IPBRoot)
def __init__(self, portal):
self.portal = portal
def rootObject(self, broker):
return _PortalWrapper(self.portal, broker)
registerAdapter(_PortalRoot, Portal, IPBRoot)
class _JellyableAvatarMixin:
"""
Helper class for code which deals with avatars which PB must be capable of
sending to a peer.
"""
def _cbLogin(self, (interface, avatar, logout)):
"""
Ensure that the avatar to be returned to the client is jellyable and
set up disconnection notification to call the realm's logout object.
"""
if not IJellyable.providedBy(avatar):
avatar = AsReferenceable(avatar, "perspective")
puid = avatar.processUniqueID()
def dereferenceLogout():
self.broker.dontNotifyOnDisconnect(logout)
logout()
self.broker._localCleanup[puid] = dereferenceLogout
# No special helper function is necessary for notifyOnDisconnect
# because dereference callbacks won't be invoked if the connection is
# randomly dropped. I'm not sure those are ideal semantics, but this
# is the only user of the (private) API at the moment and it works just
# fine as things are. -exarkun
self.broker.notifyOnDisconnect(logout)
return avatar
class _PortalWrapper(Referenceable, _JellyableAvatarMixin):
"""
Root Referenceable object, used to login to portal.
"""
def __init__(self, portal, broker):
self.portal = portal
self.broker = broker
def remote_login(self, username):
"""
Start of username/password login.
"""
c = challenge()
return c, _PortalAuthChallenger(self.portal, self.broker, username, c)
def remote_loginAnonymous(self, mind):
"""
Attempt an anonymous login.
@param mind: An object to use as the mind parameter to the portal login
call (possibly None).
@rtype: L{Deferred}
@return: A Deferred which will be called back with an avatar when login
succeeds or which will be errbacked if login fails somehow.
"""
d = self.portal.login(Anonymous(), mind, IPerspective)
d.addCallback(self._cbLogin)
return d
class _PortalAuthChallenger(Referenceable, _JellyableAvatarMixin):
"""
Called with response to password challenge.
"""
implements(IUsernameHashedPassword, IUsernameMD5Password)
def __init__(self, portal, broker, username, challenge):
self.portal = portal
self.broker = broker
self.username = username
self.challenge = challenge
def remote_respond(self, response, mind):
self.response = response
d = self.portal.login(self, mind, IPerspective)
d.addCallback(self._cbLogin)
return d
# IUsernameHashedPassword:
def checkPassword(self, password):
return self.checkMD5Password(md5(password).digest())
# IUsernameMD5Password
def checkMD5Password(self, md5Password):
md = md5()
md.update(md5Password)
md.update(self.challenge)
correct = md.digest()
return self.response == correct
__all__ = [
# Everything from flavors is exposed publically here.
'IPBRoot', 'Serializable', 'Referenceable', 'NoSuchMethod', 'Root',
'ViewPoint', 'Viewable', 'Copyable', 'Jellyable', 'Cacheable',
'RemoteCopy', 'RemoteCache', 'RemoteCacheObserver', 'copyTags',
'setUnjellyableForClass', 'setUnjellyableFactoryForClass',
'setUnjellyableForClassTree',
'MAX_BROKER_REFS', 'portno',
'ProtocolError', 'DeadReferenceError', 'Error', 'PBConnectionLost',
'RemoteMethod', 'IPerspective', 'Avatar', 'AsReferenceable',
'RemoteReference', 'CopyableFailure', 'CopiedFailure', 'failure2Copyable',
'Broker', 'respond', 'challenge', 'PBClientFactory', 'PBServerFactory',
'IUsernameMD5Password',
]
| apache-2.0 |
DeercoderResearch/convnet | py/layer.py | 5 | 2635 | from edge import *
def ChooseLayer(layer_proto):
if layer_proto.activation == convnet_config_pb2.Layer.LINEAR:
return Layer(layer_proto)
elif layer_proto.activation == convnet_config_pb2.Layer.RECTIFIED_LINEAR:
return ReLULayer(layer_proto)
elif layer_proto.activation == convnet_config_pb2.Layer.SOFTMAX:
return SoftmaxLayer(layer_proto)
else:
raise Exception('Layer type not implemented.')
class Layer(object):
def __init__(self, layer_proto):
self.num_channels_ = layer_proto.num_channels
self.is_input_ = True
self.is_output_ = True
self.incoming_edge_ = []
self.outgoing_edge_ = []
self.image_size_y_ = layer_proto.image_size_y
self.image_size_x_ = layer_proto.image_size_x
self.name_ = layer_proto.name
self.dropprob_ = layer_proto.dropprob
self.dropout_scale_up_at_train_time_ = True
self.gaussian_dropout_ = layer_proto.gaussian_dropout
self.state_ = None
def GetName(self):
return self.name_
def GetNumChannels(self):
return self.num_channels_
def IsInput(self):
return self.is_input_
def SetSize(self, image_size_y, image_size_x):
self.image_size_y_ = image_size_y
self.image_size_x_ = image_size_x
def GetSize(self):
return self.image_size_y_, self.image_size_x_
def GetNumDims(self):
return self.image_size_y_ * self.image_size_x_ * self.num_channels_
def AllocateMemory(self, batch_size):
layer_size = self.num_channels_ * self.image_size_y_ * self.image_size_x_
if self.state_ is not None:
self.state_.free_device_memory()
self.state_ = cm.empty((batch_size, layer_size))
self.state_.set_shape4d((batch_size, self.image_size_x_, self.image_size_y_, self.num_channels_))
self.state_.assign(0)
def GetState(self):
return self.state_
def AddIncomingEdge(self, e):
self.incoming_edge_.append(e)
self.is_input_ = False
def AddOutgoingEdge(self, e):
self.outgoing_edge_.append(e)
self.is_output_ = False
def ApplyActivation(self):
pass
def ApplyDropout(self):
if self.dropprob_ > 0 and not self.dropout_scale_up_at_train_time_ \
and not gaussian_dropout_:
# Scale down.
self.state_.mult(1 - self.dropprob_)
class ReLULayer(Layer):
def __init__(self, layer_proto):
super(ReLULayer, self).__init__(layer_proto)
def ApplyActivation(self):
self.state_.lower_bound(0)
self.ApplyDropout()
class SoftmaxLayer(Layer):
def __init__(self, layer_proto):
super(SoftmaxLayer, self).__init__(layer_proto)
def ApplyActivation(self):
self.state_.apply_softmax_row_major()
self.ApplyDropout()
| bsd-2-clause |
nth10sd/funfuzz | src/funfuzz/autobisectjs/known_broken_earliest_working.py | 1 | 9896 | # coding=utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Known broken changeset ranges of SpiderMonkey are specified in this file.
"""
import platform
import subprocess
from pkg_resources import parse_version
def hgrange(first_bad, first_good): # pylint: disable=missing-param-doc,missing-return-doc,missing-return-type-doc
# pylint: disable=missing-type-doc
"""Like "first_bad::first_good", but includes branches/csets that never got the first_good fix."""
# NB: mercurial's descendants(x) includes x
# So this revset expression includes first_bad, but does not include first_good.
# NB: hg log -r "(descendants(id(badddddd)) - descendants(id(baddddddd)))" happens to return the empty set,
# like we want"
return f"(descendants(id({first_bad}))-descendants(id({first_good})))"
def known_broken_ranges(options): # pylint: disable=missing-param-doc,missing-return-doc,missing-return-type-doc
# pylint: disable=missing-type-doc
"""Return a list of revsets corresponding to known-busted revisions."""
# Paste numbers into: https://hg.mozilla.org/mozilla-central/rev/<number> to get hgweb link.
# To add to the list:
# - (1) will tell you when the brokenness started
# - (1) <python executable> -m funfuzz.autobisectjs --compilationFailedLabel=bad -e FAILINGREV
# - (2) will tell you when the brokenness ended
# - (2) <python executable> -m funfuzz.autobisectjs --compilationFailedLabel=bad -s FAILINGREV
# ANCIENT FIXME: It might make sense to avoid (or note) these in checkBlameParents.
skips = [
# Fx60, broken spidermonkey
hgrange("4c72627cfc6c2dafb4590637fe1f3b5a24e133a4", "926f80f2c5ccaa5b0374b48678d62c304cbc9a68"),
# Fx63, broken spidermonkey
hgrange("1fb7ddfad86d5e085c4f2af23a2519d37e45a3e4", "5202cfbf8d60ffbb1ad9c385eda725992fc43d7f"),
# Fx64, broken spidermonkey
hgrange("aae4f349fa588aa844cfb14fae278b776aed6cb7", "c5fbbf959e23a4f33d450cb6c64ef739e09fbe13"),
# Fx66, broken spidermonkey
hgrange("f611bc50d11cae1f48cc44d1468f2c34ec46e287", "39d0c50a2209e0f0c982b1d121765c9dc950e161"),
# Fx69, broken spidermonkey
hgrange("1e4c1b283ba3e4260e1f52bd3b4cba8805bc28b9", "7fd7b5ac5743c0b219fc823441e09d84143f306a"),
# Fx69, broken spidermonkey
hgrange("36ceb8f15cb9fd797cced7f4f37c2691916b72d5", "25663e783e96b0c1a879685c295955fa2eaaf8d8"),
]
if platform.system() == "Darwin":
skips.extend([
# Fx68, see bug 1544418
hgrange("3d0236f985f83c6b2f4800f814c004e0a2902468", "32cef42080b1f7443dfe767652ea44e0dafbfd9c"),
])
if platform.system() == "Linux":
skips.extend([
# Fx56-57, failure specific to GCC 5 (and probably earlier) - supposedly works on GCC 6, see bug 1386011
hgrange("e94dceac80907abd4b579ddc8b7c202bbf461ec7", "516c01f62d840744648768b6fac23feb770ffdc1"),
])
if platform.machine() == "aarch64":
skips.extend([
# Fx54, see bug 1336344
hgrange("e8bb22053e65e2a82456e9243a07af023a8ebb13", "999757e9e5a576c884201746546a3420a92f7447"),
])
if not options.disableProfiling:
skips.extend([
# Fx54-55, to bypass the following month-long breakage, use "--disable-profiling", see bug 1339190
hgrange("aa1da5ed8a0719e0ab424e672d2f477b70ef593c", "5a03382283ae0a020b2a2d84bbbc91ff13cb2130"),
])
glibc_version = subprocess.run(["ldd", "--version"],
check=True,
encoding="utf-8",
stdout=subprocess.PIPE).stdout.splitlines()[0].split()[-1]
if parse_version(glibc_version) >= parse_version("2.28"):
skips.extend([
# Fx62-67, to bypass this 9-month-long breakage, use Ubuntu 18.04 or glibc < 2.28, see bug 1533969
# Or try and construct a one line patch to be applied during each compile
hgrange("e8d4a24e47a943db327206a4680fb75c156f9086", "7b85bf9c5210e5679fa6cfad92466a6e2ba30232"),
])
if platform.system() == "Windows":
skips.extend([
# Fx72, see bug 1598709
hgrange("0ae96da6fdb236f70579eb2ca10cbe3cf992aa1f", "130b1fe87279432128efd58fda9d9d452f55a466"),
])
if not options.enableDbg:
skips.extend([
# Fx58-59, broken opt builds w/ --enable-gczeal
hgrange("c5561749c1c64793c31699d46bbf12cc0c69815c", "f4c15a88c937e8b3940f5c1922142a6ffb137320"),
# Fx66, broken opt builds w/ --enable-gczeal
hgrange("247e265373eb26566e94303fa42b1237b80295d9", "e4aa68e2a85b027c5498bf8d8f379b06d07df6c2"),
])
if options.enableMoreDeterministic:
skips.extend([
# Fx68, see bug 1542980
hgrange("427b854cdb1c47ce6a643f83245914d66dca4382", "4c4e45853808229f832e32f6bcdbd4c92a72b13b"),
])
if options.enableSimulatorArm32:
skips.extend([
# Fx57-61, broken 32-bit ARM-simulator builds
hgrange("284002382c21842a7ebb39dcf53d5d34fd3f7692", "05669ce25b032bf83ca38e082e6f2c1bf683ed19"),
])
return skips
def earliest_known_working_rev(_options, flags, skip_revs): # pylint: disable=missing-param-doc,missing-return-doc
# pylint: disable=missing-return-type-doc,missing-type-doc,too-many-branches,too-complex,too-many-statements
"""Return a revset which evaluates to the first revision of the shell that compiles with |options|
and runs jsfunfuzz successfully with |flags|."""
# Only support at least Mac OS X 10.13
assert (not platform.system() == "Darwin") or (parse_version(platform.mac_ver()[0]) >= parse_version("10.13"))
cpu_count_flag = False
for entry in flags: # flags is a list of flags, and the option must exactly match.
if "--cpu-count=" in entry:
cpu_count_flag = True
required = []
# These should be in descending order, or bisection will break at earlier changesets.
if "--nursery-bigints=on" in flags or \
"--nursery-bigints=off" in flags: # 1st w/--nursery-bigints=on, see bug 1530372
required.append("a0d1fb0a86b04c74a8809c35230382f90cdfe779") # m-c 509086 Fx74
if "--enable-weak-refs" in flags: # 1st w/--enable-weak-refs, see bug 1587098
required.append("f273ec2ec0aecce1938a78f01925764d02af2ad2") # m-c 500139 Fx72
if platform.system() == "Windows": # 1st w/ working Windows builds w/a recent Win10 SDK and Rust 1.38+
required.append("fbcb7dcd82acfc9196c0dfd60e28248c25a4583b") # m-c 497927 Fx71
if "--parser-deferred-alloc" in flags: # 1st w/--parser-deferred-alloc, see bug 1580378
required.append("d84743fd31a19e9fed54722203ad3222af993fa8") # m-c 494269 Fx71
# Note that m-c rev 481620:2e490776b07e35013ae07a47798a983f482ffaa3 is the first with blinterp in-tree test fixes
if set(["--blinterp-eager", "--no-blinterp",
"--blinterp"]).intersection(flags): # 1st w/--blinterp-eager,--no-blinterp,--blinterp, see bug 1562129
required.append("2e490776b07e35013ae07a47798a983f482ffaa3") # m-c 481620 Fx69
if "--enable-experimental-fields" in flags: # 1st w/--enable-experimental-fields, see bug 1529758
required.append("7a1ad6647c22bd34a6c70e67dc26e5b83f71cea4") # m-c 463705 Fx67
# Note that m-c rev 457581:4b74d76e55a819852c8fa925efd25c57fdf35c9d is the first with BigInt on by default
if set(["--wasm-compiler=none", "--wasm-compiler=baseline+ion", "--wasm-compiler=baseline", "--wasm-compiler=ion",
"--wasm-compiler=cranelift"]).intersection(flags): # 1st w/--wasm-compiler=none/<others>, see bug 1509441
required.append("48dc14f79fb0a51ca796257a4179fe6f16b71b14") # m-c 455252 Fx66
if "--more-compartments" in flags: # 1st w/--more-compartments, see bug 1518753
required.append("450b8f0cbb4e494b399ebcf23a33b8d9cb883245") # m-c 453627 Fx66
if "--no-streams" in flags: # 1st w/ working --no-streams, see bug 1501734
required.append("c6a8b4d451afa922c4838bd202749c7e131cf05e") # m-c 442977 Fx65
if platform.system() == "Darwin": # 1st w/ successful Xcode 10.3 builds, see bug 1270217
required.append("6b7ace4745e30ba914ea8350bfc7fa12f2980c54") # m-c 420996 Fx62
if "--wasm-gc" in flags: # 1st w/--wasm-gc, see bug 1445272
required.append("302befe7689abad94a75f66ded82d5e71b558dc4") # m-c 413255 Fx61
if "--nursery-strings=on" in flags or \
"--nursery-strings=off" in flags: # 1st w/--nursery-strings=on, see bug 903519
required.append("321c29f4850882a2f0220a4dc041c53992c47992") # m-c 406115 Fx60
if "--spectre-mitigations=on" in flags or \
"--spectre-mitigations=off" in flags: # 1st w/--spectre-mitigations=on, see bug 1430053
required.append("a98f615965d73f6462924188fc2b1f2a620337bb") # m-c 399868 Fx59
if "--test-wasm-await-tier2" in flags: # 1st w/--test-wasm-await-tier2, see bug 1388785
required.append("b1dc87a94262c1bf2747d2bf560e21af5deb3174") # m-c 387188 Fx58
if cpu_count_flag: # 1st w/--cpu-count=<NUM>, see bug 1206770
required.append("1b55231e6628e70f0c2ee2b2cb40a1e9861ac4b4") # m-c 380023 Fx57
# 1st w/ revised template literals, see bug 1317375
required.append("bb868860dfc35876d2d9c421c037c75a4fb9b3d2") # m-c 330353 Fx53
return f"first(({common_descendants(required)}) - ({skip_revs}))"
def common_descendants(revs): # pylint: disable=missing-docstring,missing-return-doc,missing-return-type-doc
return " and ".join(f"descendants({r})" for r in revs)
| mpl-2.0 |
s20121035/rk3288_android5.1_repo | external/lldb/test/python_api/symbol-context/TestSymbolContext.py | 2 | 3795 | """
Test SBSymbolContext APIs.
"""
import os, time
import re
import unittest2
import lldb, lldbutil
from lldbtest import *
class SymbolContextAPITestCase(TestBase):
mydir = os.path.join("python_api", "symbol-context")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
@python_api_test
@dsym_test
def test_with_dsym(self):
"""Exercise SBSymbolContext API extensively."""
self.buildDsym()
self.symbol_context()
@python_api_test
@dwarf_test
def test_with_dwarf(self):
"""Exercise SBSymbolContext API extensively."""
self.buildDwarf()
self.symbol_context()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.line = line_number('main.c', '// Find the line number of function "c" here.')
def symbol_context(self):
"""Get an SBSymbolContext object and call its many methods."""
exe = os.path.join(os.getcwd(), "a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateByName('c', 'a.out')
#print "breakpoint:", breakpoint
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be on self.line.
from lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint")
frame0 = thread.GetFrameAtIndex(0)
self.assertTrue(frame0.GetLineEntry().GetLine() == self.line)
# Now get the SBSymbolContext from this frame. We want everything. :-)
context = frame0.GetSymbolContext(lldb.eSymbolContextEverything)
self.assertTrue(context)
# Get the description of this module.
module = context.GetModule()
desc = lldbutil.get_description(module)
self.expect(desc, "The module should match", exe=False,
substrs = [os.path.join(self.mydir, 'a.out')])
compileUnit = context.GetCompileUnit()
self.expect(str(compileUnit), "The compile unit should match", exe=False,
substrs = [os.path.join(self.mydir, 'main.c')])
function = context.GetFunction()
self.assertTrue(function)
#print "function:", function
block = context.GetBlock()
self.assertTrue(block)
#print "block:", block
lineEntry = context.GetLineEntry()
#print "line entry:", lineEntry
self.expect(lineEntry.GetFileSpec().GetDirectory(), "The line entry should have the correct directory",
exe=False,
substrs = [self.mydir])
self.expect(lineEntry.GetFileSpec().GetFilename(), "The line entry should have the correct filename",
exe=False,
substrs = ['main.c'])
self.assertTrue(lineEntry.GetLine() == self.line,
"The line entry's line number should match ")
symbol = context.GetSymbol()
self.assertTrue(function.GetName() == symbol.GetName() and symbol.GetName() == 'c',
"The symbol name should be 'c'")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| gpl-3.0 |
bocon13/buck | programs/test_buck_tool.py | 16 | 3146 | # Copyright 2016-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from buck_tool import CommandLineArgs
class TestCommandLineArgs(unittest.TestCase):
def test_empty_command(self):
args = CommandLineArgs(["buck"])
self.assertEqual(args.command, None)
self.assertEqual(args.buck_options, [])
self.assertEqual(args.command_options, [])
self.assertTrue(args.is_help(), "With no arguments should show help")
def test_single_command(self):
args = CommandLineArgs(["buck", "clean"])
self.assertEqual(args.command, "clean")
self.assertEqual(args.buck_options, [])
self.assertEqual(args.command_options, [])
self.assertFalse(args.is_help())
def test_global_short_help(self):
args = CommandLineArgs(["buck", "-h"])
self.assertEqual(args.command, None)
self.assertEqual(args.buck_options, ["-h"])
self.assertEqual(args.command_options, [])
self.assertTrue(args.is_help())
def test_global_help(self):
args = CommandLineArgs(["buck", "--help"])
self.assertEqual(args.command, None)
self.assertEqual(args.buck_options, ["--help"])
self.assertEqual(args.command_options, [])
self.assertTrue(args.is_help())
def test_global_version(self):
args = CommandLineArgs(["buck", "--version"])
self.assertEqual(args.command, None)
self.assertEqual(args.buck_options, ["--version"])
self.assertEqual(args.command_options, [])
self.assertTrue(args.is_help(), "--version does not require a build")
def test_command_help(self):
args = CommandLineArgs(["buck", "clean", "--help"])
self.assertEqual(args.command, "clean")
self.assertEqual(args.buck_options, [])
self.assertEqual(args.command_options, ["--help"])
self.assertTrue(args.is_help())
def test_help_command(self):
args = CommandLineArgs(["buck", "--help", "clean"])
self.assertEqual(args.command, "clean")
self.assertEqual(args.buck_options, ["--help"])
self.assertEqual(args.command_options, [])
self.assertFalse(args.is_help(), "Global --help ignored with command")
def test_command_all(self):
args = CommandLineArgs(["buck", "--help", "--version", "clean", "--help", "all"])
self.assertEqual(args.command, "clean")
self.assertEqual(args.buck_options, ["--help", "--version"])
self.assertEqual(args.command_options, ["--help", "all"])
self.assertTrue(args.is_help())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
beni55/gunicorn | tests/requests/valid/016.py | 40 | 2376 | certificate = """-----BEGIN CERTIFICATE-----\r\n
MIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n
ETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n
AkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n
dWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n
SzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n
BAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n
BQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n
W51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n
gW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n
0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n
u2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n
wgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n
1UdEwEB/wQCMAAwEQYJYIZIAYb4QgHTTPAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n
BglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n
VR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n
loCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n
aWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n
9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n
IjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n
BgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n
cHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4Qg\r\n
EDBDAWLmh0dHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC\r\n
5jcmwwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n
Y3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n
XCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n
UO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n
hTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n
wTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n
Yhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n
RA==\r\n
-----END CERTIFICATE-----""".replace("\n\n", "\n")
request = {
"method": "GET",
"uri": uri("/"),
"version": (1, 1),
"headers": [("X-SSL-CERT", certificate)],
"body": b""
}
| mit |
azatoth/scons | test/implicit-cache/GetOption.py | 5 | 1778 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that SetOption/GetOption('implicit_cache') works and can
be overridden from the command line.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
assert not GetOption('implicit_cache')
SetOption('implicit_cache', 1)
assert GetOption('implicit_cache')
""")
test.run()
test.write('SConstruct', """
assert GetOption('implicit_cache')
SetOption('implicit_cache', 0)
assert GetOption('implicit_cache')
""")
test.run(arguments='--implicit-cache')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
znoland3/zachdemo | venvdir/lib/python3.4/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| mit |
ropik/androguard | androguard/core/analysis/risk.py | 24 | 51139 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# risks from classes.dex:
# API <-> Permissions
# method X is more dangerous than another one
# const-string -> apk-tool
# v0 <- X
# v1 <- Y
# v10 <- X
# v11 <- Y
# CALL( v0, v1 )
# obfuscated names
GENERAL_RISK = 0
DANGEROUS_RISK = 1
SIGNATURE_SYSTEM_RISK = 2
SIGNATURE_RISK = 3
NORMAL_RISK = 4
MONEY_RISK = 5
SMS_RISK = 6
PHONE_RISK = 7
INTERNET_RISK = 8
PRIVACY_RISK = 9
DYNAMIC_RISK = 10
BINARY_RISK = 11
EXPLOIT_RISK = 12
RISK_VALUES = {
DANGEROUS_RISK : 5,
SIGNATURE_SYSTEM_RISK : 10,
SIGNATURE_RISK : 10,
NORMAL_RISK : 0,
MONEY_RISK : 5,
SMS_RISK : 5,
PHONE_RISK : 5,
INTERNET_RISK : 2,
PRIVACY_RISK : 5,
DYNAMIC_RISK : 5,
BINARY_RISK : 10,
EXPLOIT_RISK : 15,
}
GENERAL_PERMISSIONS_RISK = {
"dangerous" : DANGEROUS_RISK,
"signatureOrSystem" : SIGNATURE_SYSTEM_RISK,
"signatureOrSystemOrDevelopment" : SIGNATURE_SYSTEM_RISK,
"signature" : SIGNATURE_RISK,
"normal" : NORMAL_RISK,
}
PERMISSIONS_RISK = {
"SEND_SMS" : [ MONEY_RISK, SMS_RISK ],
"RECEIVE_SMS" : [ SMS_RISK ],
"READ_SMS" : [ SMS_RISK ],
"WRITE_SMS" : [ SMS_RISK ],
"RECEIVE_SMS" : [ SMS_RISK ],
"RECEIVE_MMS" : [ SMS_RISK ],
"PHONE_CALL" : [ MONEY_RISK ],
"PROCESS_OUTGOING_CALLS" : [ MONEY_RISK ],
"CALL_PRIVILEGED" : [ MONEY_RISK ],
"INTERNET" : [ MONEY_RISK, INTERNET_RISK ],
"READ_PHONE_STATE" : [ PRIVACY_RISK ],
"READ_CONTACTS" : [ PRIVACY_RISK ],
"READ_HISTORY_BOOKMARKS" : [ PRIVACY_RISK ],
"ACCESS_FINE_LOCATION" : [ PRIVACY_RISK ],
"ACCESS_COARSE_LOCATION" : [ PRIVACY_RISK ],
}
LOW_RISK = "low"
AVERAGE_RISK = "average"
HIGH_RISK = "high"
UNACCEPTABLE_RISK = "unacceptable"
NULL_MALWARE_RISK = "null"
AVERAGE_MALWARE_RISK = "average"
HIGH_MALWARE_RISK = "high"
UNACCEPTABLE_MALWARE_RISK = "unacceptable"
from androguard.core.androconf import error, warning, debug, set_debug, get_debug
from androguard.core.bytecodes import dvm
from androguard.core.analysis import analysis
from androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS
import re, copy
def add_system_rule(system, rule_name, rule):
system.rules[ rule_name ] = rule
def create_system_risk():
try:
import fuzzy
except ImportError:
error("please install pyfuzzy to use this module !")
import fuzzy.System
import fuzzy.InputVariable
import fuzzy.fuzzify.Plain
import fuzzy.OutputVariable
import fuzzy.defuzzify.COGS
import fuzzy.defuzzify.COG
import fuzzy.defuzzify.MaxRight
import fuzzy.defuzzify.MaxLeft
import fuzzy.defuzzify.LM
import fuzzy.set.Polygon
import fuzzy.set.Singleton
import fuzzy.set.Triangle
import fuzzy.Adjective
import fuzzy.operator.Input
import fuzzy.operator.Compound
import fuzzy.norm.Min
import fuzzy.norm.Max
import fuzzy.Rule
import fuzzy.defuzzify.Dict
system = fuzzy.System.System()
input_Dangerous_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Money_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Privacy_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Binary_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Internet_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Dynamic_Risk = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
# Input variables
# Dangerous Risk
system.variables["input_Dangerous_Risk"] = input_Dangerous_Risk
input_Dangerous_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (8.0, 1.0), (12.0, 0.0)]) )
input_Dangerous_Risk.adjectives[AVERAGE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(8.0, 0.0), (50.0, 1.0), (60.0, 0.0)]) )
input_Dangerous_Risk.adjectives[HIGH_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(50.0, 0.0), (85.0, 1.0), (95.0, 0.0)]) )
input_Dangerous_Risk.adjectives[UNACCEPTABLE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(85.0, 0.0), (100.0, 1.0)]) )
# Money Risk
system.variables["input_Money_Risk"] = input_Money_Risk
input_Money_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (3.0, 0.0)]) )
input_Money_Risk.adjectives[UNACCEPTABLE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(4.0, 0.0), (5.0, 1.0), (30.0, 1.0)]) )
# Privacy Risk
system.variables["input_Privacy_Risk"] = input_Privacy_Risk
input_Privacy_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (6.0, 1.0), (10.0, 0.0)]) )
input_Privacy_Risk.adjectives[HIGH_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(6.0, 0.0), (10.0, 1.0), (20.0, 0.0)]) )
input_Privacy_Risk.adjectives[UNACCEPTABLE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(15.0, 0.0), (20.0, 1.0), (30.0, 1.0)]) )
# Binary Risk
system.variables["input_Binary_Risk"] = input_Binary_Risk
input_Binary_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (6.0, 1.0), (10.0, 0.0)]) )
input_Binary_Risk.adjectives[AVERAGE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(6.0, 0.0), (10.0, 1.0), (15.0, 0.0)]) )
input_Binary_Risk.adjectives[HIGH_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(10.0, 0.0), (20.0, 1.0), (24.0, 0.0)]) )
input_Binary_Risk.adjectives[UNACCEPTABLE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(23.0, 0.0), (30.0, 1.0), (40.0, 1.0)]) )
# Internet Risk
system.variables["input_Internet_Risk"] = input_Internet_Risk
#input_Internet_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (1.0, 1.0)]) )
input_Internet_Risk.adjectives[HIGH_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(1.0, 0.0), (5.0, 1.0), (30.0, 1.0)]) )
# Dynamic Risk
system.variables["input_Dynamic_Risk"] = input_Dynamic_Risk
input_Dynamic_Risk.adjectives[LOW_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (3.0, 0.0)]))
input_Dynamic_Risk.adjectives[UNACCEPTABLE_RISK] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(4.0, 0.0), (5.0, 1.0), (50.0, 1.0)]) )
# Output variables
output_malware_risk = fuzzy.OutputVariable.OutputVariable(
defuzzify=fuzzy.defuzzify.COGS.COGS(),
description="malware risk",
min=0.0,max=100.0,
)
#output_malware_risk = fuzzy.OutputVariable.OutputVariable(defuzzify=fuzzy.defuzzify.Dict.Dict())
output_malware_risk.adjectives[NULL_MALWARE_RISK] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(0.0))
output_malware_risk.adjectives[AVERAGE_MALWARE_RISK] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(30.0))
output_malware_risk.adjectives[HIGH_MALWARE_RISK] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(60.0))
output_malware_risk.adjectives[UNACCEPTABLE_MALWARE_RISK] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(100.0))
system.variables["output_malware_risk"] = output_malware_risk
# Rules
#RULE 0: DYNAMIC
add_system_rule(system, "r0", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[NULL_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Dynamic_Risk"].adjectives[LOW_RISK] )
)
)
add_system_rule(system, "r0a", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[UNACCEPTABLE_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Dynamic_Risk"].adjectives[UNACCEPTABLE_RISK] )
)
)
#RULE 1: MONEY
add_system_rule(system, "r1", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[NULL_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Money_Risk"].adjectives[LOW_RISK] )
)
)
add_system_rule(system, "r1a", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[UNACCEPTABLE_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Money_Risk"].adjectives[UNACCEPTABLE_RISK] )
)
)
#RULE 3 : BINARY
add_system_rule(system, "r3", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[AVERAGE_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Binary_Risk"].adjectives[AVERAGE_RISK] )
)
)
add_system_rule(system, "r3a", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[HIGH_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Binary_Risk"].adjectives[HIGH_RISK] )
)
)
add_system_rule(system, "r3b", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[UNACCEPTABLE_MALWARE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Binary_Risk"].adjectives[UNACCEPTABLE_RISK] )
)
)
# PRIVACY + INTERNET
add_system_rule(system, "r5", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[HIGH_MALWARE_RISK]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Privacy_Risk"].adjectives[LOW_RISK] ),
fuzzy.operator.Input.Input( system.variables["input_Internet_Risk"].adjectives[HIGH_RISK] ) )
)
)
add_system_rule(system, "r5a", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[UNACCEPTABLE_MALWARE_RISK]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Privacy_Risk"].adjectives[HIGH_RISK] ),
fuzzy.operator.Input.Input( system.variables["input_Internet_Risk"].adjectives[HIGH_RISK] ) )
)
)
add_system_rule(system, "r6", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[HIGH_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Dangerous_Risk"].adjectives[HIGH_RISK] )
)
)
add_system_rule(system, "r6a", fuzzy.Rule.Rule(
adjective=[system.variables["output_malware_risk"].adjectives[UNACCEPTABLE_RISK]],
operator=fuzzy.operator.Input.Input( system.variables["input_Dangerous_Risk"].adjectives[UNACCEPTABLE_RISK] )
)
)
return system
PERFECT_SCORE = "perfect"
HIGH_SCORE = "high"
AVERAGE_SCORE = "average"
LOW_SCORE = "low"
NULL_METHOD_SCORE = "null"
AVERAGE_METHOD_SCORE = "average"
HIGH_METHOD_SCORE = "high"
PERFECT_METHOD_SCORE = "perfect"
def create_system_method_score():
try:
import fuzzy
except ImportError:
error("please install pyfuzzy to use this module !")
import fuzzy.System
import fuzzy.InputVariable
import fuzzy.fuzzify.Plain
import fuzzy.OutputVariable
import fuzzy.defuzzify.COGS
import fuzzy.set.Polygon
import fuzzy.set.Singleton
import fuzzy.set.Triangle
import fuzzy.Adjective
import fuzzy.operator.Input
import fuzzy.operator.Compound
import fuzzy.norm.Min
import fuzzy.norm.Max
import fuzzy.Rule
system = fuzzy.System.System()
input_Length_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Match_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_AndroidEntropy_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_JavaEntropy_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Permissions_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Similarity_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
# Input variables
# Length
system.variables["input_Length_MS"] = input_Length_MS
input_Length_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (50.0, 1.0), (100.0, 0.0)]) )
input_Length_MS.adjectives[AVERAGE_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(50.0, 0.0), (100.0, 1.0), (150.0, 1.0), (300.0, 0.0)]) )
input_Length_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(150.0, 0.0), (200.0, 1.0), (300.0, 1.0), (400.0, 0.0)]) )
input_Length_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(350.0, 0.0), (400.0, 1.0), (500.0, 1.0)]) )
# Match
system.variables["input_Match_MS"] = input_Match_MS
input_Match_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (20.0, 1.0), (50.0, 0.0)]) )
input_Match_MS.adjectives[AVERAGE_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(40.0, 0.0), (45.0, 1.0), (60.0, 1.0), (80.0, 0.0)]) )
input_Match_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(75.0, 0.0), (90.0, 1.0), (98.0, 1.0), (99.0, 0.0)]) )
input_Match_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(98.0, 0.0), (100.0, 1.0)]) )
#input_Match_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Singleton.Singleton( 100.0 ) )
# Android Entropy
system.variables["input_AndroidEntropy_MS"] = input_AndroidEntropy_MS
input_AndroidEntropy_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (4.0, 0.0)]) )
input_AndroidEntropy_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (30.0, 1.0)]) )
# Java Entropy
system.variables["input_JavaEntropy_MS"] = input_JavaEntropy_MS
input_JavaEntropy_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (4.0, 0.0)]) )
input_JavaEntropy_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (30.0, 1.0)]) )
# Permissions
system.variables["input_Permissions_MS"] = input_Permissions_MS
input_Permissions_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (3.0, 1.0), (4.0, 0.0)]) )
input_Permissions_MS.adjectives[AVERAGE_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (8.0, 1.0), (9.0, 0.0)]) )
input_Permissions_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(8.0, 0.0), (10.0, 1.0), (12.0, 1.0), (13.0, 0.0)]) )
input_Permissions_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(12.0, 0.0), (13.0, 1.0), (20.0, 1.0)]) )
# Similarity Match
system.variables["input_Similarity_MS"] = input_Similarity_MS
input_Similarity_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (0.1, 1.0), (0.3, 0.0)]) )
input_Similarity_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.3, 0.0), (0.35, 1.0), (0.4, 1.0)]) )
# Output variables
output_method_score = fuzzy.OutputVariable.OutputVariable(
defuzzify=fuzzy.defuzzify.COGS.COGS(),
description="method score",
min=0.0,max=100.0,
)
output_method_score.adjectives[NULL_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(0.0))
output_method_score.adjectives[AVERAGE_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(50.0))
output_method_score.adjectives[HIGH_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(80.0))
output_method_score.adjectives[PERFECT_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(100.0))
system.variables["output_method_score"] = output_method_score
add_system_rule(system, "android entropy null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_AndroidEntropy_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "java entropy null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_JavaEntropy_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "permissions null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "permissions average", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[AVERAGE_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[AVERAGE_SCORE] ))
)
add_system_rule(system, "permissions high", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[HIGH_SCORE] ))
)
add_system_rule(system, "permissions perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[PERFECT_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[PERFECT_SCORE] ))
)
add_system_rule(system, "similarity low", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Similarity_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "length match perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[PERFECT_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_Match_MS"].adjectives[PERFECT_SCORE] ) )
)
)
add_system_rule(system, "length match null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[LOW_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_Match_MS"].adjectives[PERFECT_SCORE] ) )
)
)
add_system_rule(system, "length AndroidEntropy perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_AndroidEntropy_MS"].adjectives[HIGH_SCORE] ) )
)
)
add_system_rule(system, "length JavaEntropy perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_JavaEntropy_MS"].adjectives[HIGH_SCORE] ) )
)
)
add_system_rule(system, "length similarity perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[PERFECT_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_Similarity_MS"].adjectives[HIGH_SCORE] ),
)
)
)
add_system_rule(system, "length similarity average", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[AVERAGE_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_Similarity_MS"].adjectives[HIGH_SCORE] ),
)
)
)
return system
def create_system_method_one_score():
try:
import fuzzy
except ImportError:
error("please install pyfuzzy to use this module !")
import fuzzy.System
import fuzzy.InputVariable
import fuzzy.fuzzify.Plain
import fuzzy.OutputVariable
import fuzzy.defuzzify.COGS
import fuzzy.set.Polygon
import fuzzy.set.Singleton
import fuzzy.set.Triangle
import fuzzy.Adjective
import fuzzy.operator.Input
import fuzzy.operator.Compound
import fuzzy.norm.Min
import fuzzy.norm.Max
import fuzzy.Rule
system = fuzzy.System.System()
input_Length_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_AndroidEntropy_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_JavaEntropy_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
input_Permissions_MS = fuzzy.InputVariable.InputVariable(fuzzify=fuzzy.fuzzify.Plain.Plain())
# Input variables
# Length
system.variables["input_Length_MS"] = input_Length_MS
input_Length_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (50.0, 1.0), (100.0, 0.0)]) )
input_Length_MS.adjectives[AVERAGE_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(50.0, 0.0), (100.0, 1.0), (150.0, 1.0), (300.0, 0.0)]) )
input_Length_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(150.0, 0.0), (200.0, 1.0), (300.0, 1.0), (400.0, 0.0)]) )
input_Length_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(350.0, 0.0), (400.0, 1.0), (500.0, 1.0)]) )
# Android Entropy
system.variables["input_AndroidEntropy_MS"] = input_AndroidEntropy_MS
input_AndroidEntropy_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (4.0, 0.0)]) )
input_AndroidEntropy_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (30.0, 1.0)]) )
# Java Entropy
system.variables["input_JavaEntropy_MS"] = input_JavaEntropy_MS
input_JavaEntropy_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (2.0, 1.0), (4.0, 0.0)]) )
input_JavaEntropy_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (30.0, 1.0)]) )
# Permissions
system.variables["input_Permissions_MS"] = input_Permissions_MS
input_Permissions_MS.adjectives[LOW_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(0.0, 1.0), (3.0, 1.0), (4.0, 0.0)]) )
input_Permissions_MS.adjectives[AVERAGE_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(3.0, 0.0), (4.0, 1.0), (8.0, 1.0), (9.0, 0.0)]) )
input_Permissions_MS.adjectives[HIGH_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(8.0, 0.0), (10.0, 1.0), (12.0, 1.0), (13.0, 0.0)]) )
input_Permissions_MS.adjectives[PERFECT_SCORE] = fuzzy.Adjective.Adjective( fuzzy.set.Polygon.Polygon([(12.0, 0.0), (13.0, 1.0), (20.0, 1.0)]) )
# Output variables
output_method_score = fuzzy.OutputVariable.OutputVariable(
defuzzify=fuzzy.defuzzify.COGS.COGS(),
description="method one score",
min=0.0,max=100.0,
)
output_method_score.adjectives[NULL_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(0.0))
output_method_score.adjectives[AVERAGE_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(50.0))
output_method_score.adjectives[HIGH_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(80.0))
output_method_score.adjectives[PERFECT_METHOD_SCORE] = fuzzy.Adjective.Adjective(fuzzy.set.Singleton.Singleton(100.0))
system.variables["output_method_one_score"] = output_method_score
add_system_rule(system, "android entropy null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_AndroidEntropy_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "java entropy null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_JavaEntropy_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "permissions null", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[NULL_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[LOW_SCORE] ))
)
add_system_rule(system, "permissions average", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[AVERAGE_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[AVERAGE_SCORE] ))
)
add_system_rule(system, "permissions high", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[HIGH_SCORE] ))
)
add_system_rule(system, "permissions perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[PERFECT_METHOD_SCORE]],
operator=fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[PERFECT_SCORE] ))
)
add_system_rule(system, "length permissions perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[PERFECT_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_Permissions_MS"].adjectives[PERFECT_SCORE] ) )
)
)
add_system_rule(system, "length AndroidEntropy perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_AndroidEntropy_MS"].adjectives[HIGH_SCORE] ) )
)
)
add_system_rule(system, "length JavaEntropy perfect", fuzzy.Rule.Rule(
adjective=[system.variables["output_method_one_score"].adjectives[HIGH_METHOD_SCORE]],
operator=fuzzy.operator.Compound.Compound(
fuzzy.norm.Min.Min(),
fuzzy.operator.Input.Input( system.variables["input_Length_MS"].adjectives[PERFECT_SCORE] ),
fuzzy.operator.Input.Input( system.variables["input_JavaEntropy_MS"].adjectives[HIGH_SCORE] ) )
)
)
return system
def export_system(system, directory):
from fuzzy.doc.plot.gnuplot import doc
d = doc.Doc(directory)
d.createDoc(system)
import fuzzy.doc.structure.dot.dot
import subprocess
for name,rule in system.rules.items():
cmd = "dot -T png -o '%s/fuzzy-Rule %s.png'" % (directory,name)
f = subprocess.Popen(cmd, shell=True, bufsize=32768, stdin=subprocess.PIPE).stdin
fuzzy.doc.structure.dot.dot.print_header(f,"XXX")
fuzzy.doc.structure.dot.dot.print_dot(rule,f,system,"")
fuzzy.doc.structure.dot.dot.print_footer(f)
cmd = "dot -T png -o '%s/fuzzy-System.png'" % directory
f = subprocess.Popen(cmd, shell=True, bufsize=32768, stdin=subprocess.PIPE).stdin
fuzzy.doc.structure.dot.dot.printDot(system,f)
d.overscan=0
in_vars = [name for name,var in system.variables.items() if isinstance(var,fuzzy.InputVariable.InputVariable)]
out_vars = [name for name,var in system.variables.items() if isinstance(var,fuzzy.OutputVariable.OutputVariable)]
if len(in_vars) == 2 and not (
isinstance(system.variables[in_vars[0]].fuzzify,fuzzy.fuzzify.Dict.Dict)
or
isinstance(system.variables[in_vars[1]].fuzzify,fuzzy.fuzzify.Dict.Dict)
):
for out_var in out_vars:
args = []
if isinstance(system.variables[out_var].defuzzify,fuzzy.defuzzify.Dict.Dict):
for adj in system.variables[out_var].adjectives:
d.create3DPlot_adjective(system, in_vars[0], in_vars[1], out_var, adj, {})
else:
d.create3DPlot(system, in_vars[0], in_vars[1], out_var, {})
class RiskIndicator(object):
def __init__(self):
self.risk_analysis_obj = []
def add_risk_analysis(self, obj):
self.risk_analysis_obj.append( obj )
def with_apk(self, apk_file):
if apk_file.is_valid_APK():
d = dvm.DalvikVMFormat( apk_file.get_dex() )
dx = analysis.uVMAnalysis( d )
return self.with_apk_direct(apk_file, d, dx)
return {}
def with_apk_direct(self, apk_file, d, dx):
res = {}
for i in self.risk_analysis_obj:
res[ i.get_name() ] = i.with_apk( apk_file, d, dx )
return res
def with_dex(self, dex_file):
"""
@param dex_file : a buffer
@rtype : return the risk of the dex file (from 0.0 to 100.0)
"""
d = dvm.DalvikVMFormat( dex_file )
dx = analysis.uVMAnalysis( d )
return self.with_dex_direct(d, dx)
def with_dex_direct(self, d, dx):
res = {}
for i in self.risk_analysis_obj:
res[ i.get_name() ] = i.with_dex( d, dx )
return res
class FuzzyRisk(object):
"""
Calculate the risk to install a specific android application by using:
Permissions:
- dangerous
- signatureOrSystem
- signature
- normal
- money
- internet
- sms
- call
- privacy
API:
- DexClassLoader
Files:
- binary file
- shared library
note : pyfuzzy without fcl support (don't install antlr)
"""
def __init__(self):
self.system = create_system_risk()
# export_system( SYSTEM, "./output" )
self.system_method_risk = create_system_method_one_score()
def get_name(self):
return "FuzzyRisk"
def with_apk(self, apk_file, d, dx):
"""
@param apk_file : an L{APK} object
@rtype : return the risk of the apk file (from 0.0 to 100.0)
"""
risks = { DANGEROUS_RISK : 0.0,
MONEY_RISK : 0.0,
PRIVACY_RISK : 0.0,
INTERNET_RISK : 0.0,
BINARY_RISK : 0.0,
DYNAMIC_RISK : 0.0,
}
self.__eval_risk_perm( apk_file.get_details_permissions(), risks )
self.__eval_risk_dyn( dx, risks )
self.__eval_risk_bin( apk_file.get_files_types(), risks )
val = self.__eval_risks( risks )
return val
def with_dex(self, vm, vmx):
risks = { DANGEROUS_RISK : 0.0,
MONEY_RISK : 0.0,
PRIVACY_RISK : 0.0,
INTERNET_RISK : 0.0,
BINARY_RISK : 0.0,
DYNAMIC_RISK : 0.0,
}
d = {}
for i in vmx.get_permissions( [] ):
d[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][i]
self.__eval_risk_perm( d, risks )
self.__eval_risk_dyn( vmx, risks )
val = self.__eval_risks( risks )
return val
def test(self):
##########################
score_order_sign = {}
import sys
sys.path.append("./elsim")
from elsim.elsign.libelsign import libelsign
for method in vm.get_methods():
if method.get_length() < 80:
continue
score_order_sign[ method ] = self.get_method_score( method.get_length(),
libelsign.entropy( vmx.get_method_signature(method, "L4", { "L4" : { "arguments" : ["Landroid"] } } ).get_string() ),
libelsign.entropy( vmx.get_method_signature(method, "L4", { "L4" : { "arguments" : ["Ljava"] } } ).get_string() ),
map(lambda perm : (perm, DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]), vmx.get_permissions_method( method )),
)
for v in sorted(score_order_sign, key=lambda x : score_order_sign[x], reverse=True):
print v.get_name(), v.get_class_name(), v.get_descriptor(), v.get_length(), score_order_sign[ v ]
##########################
return val, score_order_sign
def __eval_risk_perm(self, list_details_permissions, risks):
for i in list_details_permissions:
permission = i
if permission.find(".") != -1:
permission = permission.split(".")[-1]
# print permission, GENERAL_PERMISSIONS_RISK[ list_details_permissions[ i ][0] ]
risk_type = GENERAL_PERMISSIONS_RISK[ list_details_permissions[ i ][0] ]
risks[ DANGEROUS_RISK ] += RISK_VALUES [ risk_type ]
try:
for j in PERMISSIONS_RISK[ permission ]:
risks[ j ] += RISK_VALUES[ j ]
except KeyError:
pass
def __eval_risk_dyn(self, vmx, risks):
for m, _ in vmx.tainted_packages.get_packages():
if m.get_name() == "Ldalvik/system/DexClassLoader;":
for path in m.get_paths():
if path.get_access_flag() == analysis.TAINTED_PACKAGE_CREATE:
risks[ DYNAMIC_RISK ] = RISK_VALUES[ DYNAMIC_RISK ]
return
def __eval_risk_bin(self, list_details_files, risks):
for i in list_details_files:
if "ELF" in list_details_files[ i ]:
# shared library
if "shared" in list_details_files[ i ]:
risks[ BINARY_RISK ] += RISK_VALUES [ BINARY_RISK ]
# binary
else:
risks[ BINARY_RISK ] += RISK_VALUES [ EXPLOIT_RISK ]
def __eval_risks(self, risks):
output_values = {"output_malware_risk" : 0.0}
input_val = {}
input_val['input_Dangerous_Risk'] = risks[ DANGEROUS_RISK ]
input_val['input_Money_Risk'] = risks[ MONEY_RISK ]
input_val['input_Privacy_Risk'] = risks[ PRIVACY_RISK ]
input_val['input_Binary_Risk'] = risks[ BINARY_RISK ]
input_val['input_Internet_Risk'] = risks[ INTERNET_RISK ]
input_val['input_Dynamic_Risk'] = risks[ DYNAMIC_RISK ]
#print input_val,
self.system.calculate(input=input_val, output = output_values)
val = output_values[ "output_malware_risk" ]
return { "VALUE" : val }
def get_method_score(self, length, android_entropy, java_entropy, permissions):
val_permissions = 0
for i in permissions:
val_permissions += RISK_VALUES[ GENERAL_PERMISSIONS_RISK[ i[1][0] ] ]
try:
for j in PERMISSIONS_RISK[ i[0] ]:
val_permissions += RISK_VALUES[ j ]
except KeyError:
pass
print length, android_entropy, java_entropy, val_permissions
output_values = {"output_method_one_score" : 0.0}
input_val = {}
input_val['input_Length_MS'] = length
input_val['input_AndroidEntropy_MS'] = android_entropy
input_val['input_JavaEntropy_MS'] = java_entropy
input_val['input_Permissions_MS'] = val_permissions
self.system_method_risk.calculate(input=input_val, output = output_values)
score = output_values[ "output_method_one_score" ]
return score
def simulate(self, risks):
return self.__eval_risks( risks )
class RedFlags(object):
# APK
# BINARY
# shared library
# executable
# dex
# apk
# jar
# shell script
# Perm
# SMS
# Call
# Money
# Internet
# Privacy
# Normal
# Dangerous
# Signature
# System
# DEX
# Obfuscation
def __init__(self):
self.flags = { "APK" : {
"SHARED LIBRARIES" : 0, # presence of shared libraries (ELF)
"EXECUTABLE" : 0, # presence of executables (ELF)
"DEX" : 0, # presence of dex files
"APK" : 0, # presence of APK files
"ZIP" : 0, # presence of zip files
"SHELL_SCRIPT" : 0, # presence of shell scripts
},
"PERM" : {
"SMS" : 0, # presence of permissions which can manipulate sms
"CALL" : 0, # presence of permissions which can perform a call
"GPS" : 0, # presence of permissions which can manipulate your location
"MONEY" : 0, # presence of permissions which can result to a payement
"INTERNET" : 0, # presence of permissions which can access to internet
"PRIVACY" : 0, # presence of permissions which can access to private information
"NORMAL" : 0, # "The default value. A lower-risk permission that gives requesting applications access to isolated application-level features, with minimal risk to other applications, the system, or the user"
"DANGEROUS" : 0, # "A higher-risk permission that would give a requesting application access to private user data or control over the device that can negatively impact the user"
"SIGNATURE" : 0, # "A permission that the system grants only if the requesting application is signed with the same certificate as the application that declared the permission"
"SIGNATUREORSYSTEM" : 0, # "A permission that the system grants only to applications that are in the Android system image or that are signed with the same certificates as those in the system image"
},
"DEX" : {
"REFLECTION" : 0, # presence of the reflection API
"NATIVE" : 0, # presence of loading a shared library
"DYNAMIC" : 0, # presence of loading dynamically a new dex file
"CRYPTO" : 0, # presence of crypto functions
},
#"OBFUSCATION" : { # presence of obfuscation techniques
#}
}
self.flags_dex = { "DEX" : self.flags["DEX"] }
def get_name(self):
return "RedFlags"
def with_apk(self, apk_file, d, dx):
flags = copy.deepcopy( self.flags )
self.analyze_apk( apk_file, flags["APK"] )
self.analyze_axml( apk_file, flags["PERM"] )
self.analyze_dex( d, dx, flags["DEX"] )
return flags
def with_dex(self, d, dx):
flags = self.flags_dex.copy()
self.analyze_dex( d, dx, flags["DEX"] )
return flags
def analyze_apk(self, a, flags):
elf_executable = [ re.compile("ELF.+executable.+"), "EXECUTABLE" ]
lib_elf = [ re.compile("ELF.+shared object"), "SHARED LIBRARIES" ]
apk_file = [ re.compile("Android application package file"), "APK" ]
dex_file = [ re.compile("Dalvik dex file version 035"), "DEX", re.compile("^classes.dex$") ]
script_file = [ re.compile("script text executable"), "SHELL_SCRIPT" ]
zip_file = [ re.compile("^Zip archive data.+"), "ZIP" ]
regexp = [ elf_executable, lib_elf, apk_file, dex_file, script_file, zip_file ]
files_types = a.get_files_types()
for i in files_types:
for j in regexp:
if j[0].search( files_types[i] ) != None:
if len(j) < 3:
flags[j[1]] += 1
else:
if j[2].search( i ) == None:
flags[j[1]] += 1
def analyze_axml(self, a, flags):
perms = {
"SEND_SMS" : [ "MONEY", "SMS" ],
"SEND_SMS_NO_CONFIRMATION" : [ "MONEY", "SMS"],
"READ_SMS" : [ "SMS", "PRIVACY" ],
"WRITE_SMS" : [ "MONEY", "SMS" ],
"RECEIVE_SMS" : [ "SMS", "PRIVACY" ],
"RECEIVE_MMS" : [ "SMS", "PRIVACY" ],
"PHONE_CALL" : [ "MONEY", "CALL" ],
"PROCESS_OUTGOING_CALLS" : [ "MONEY", "CALL" ],
"CALL_PRIVILEGED" : [ "MONEY", "CALL" ],
"INTERNET" : [ "INTERNET" ],
"READ_PHONE_STATE" : [ "PRIVACY" ],
"READ_CONTACTS" : [ "PRIVACY" ],
"WRITE_CONTACTS" : [ "PRIVACY" ],
"READ_HISTORY_BOOKMARKS" : [ "PRIVACY" ],
"WRITE_HISTORY_BOOKMARKS" : [ "PRIVACY" ],
"READ_PROFILE" : [ "PRIVACY" ],
"WRITE_PROFILE" : [ "PRIVACY" ],
"READ_SOCIAL_STREAM" : [ "PRIVACY" ],
"WRITE_SOCIAL_STREAM" : [ "PRIVACY" ],
"READ_CALENDAR" : [ "PRIVACY" ],
"WRITE_CALENDAR" : [ "PRIVACY" ],
"READ_USER_DICTIONARY" : [ "PRIVACY" ],
"WRITE_USER_DICTIONARY" : [ "PRIVACY" ],
"SET_ALARM" : [ "PRIVACY" ],
"ADD_VOICEMAIL" : [ "PRIVACY" ],
"GET_ACCOUNTS" : [ "PRIVACY" ],
"MANAGE_ACCOUNTS" : [ "PRIVACY" ],
"RECORD_AUDIO" : [ "PRIVACY" ],
"CAMERA" : [ "PRIVACY" ],
"ACCESS_FINE_LOCATION" : [ "PRIVACY", "GPS" ],
"ACCESS_COARSE_LOCATION" : [ "PRIVACY", "GPS" ],
"ACCESS_LOCATION_EXTRA_COMMANS" : [ "GPS"],
"INSTALL_LOCATION_PROVIDER" : [ "GPS" ],
}
for i in a.get_permissions():
perm = i.split(".")[-1]
try:
flags[ DVM_PERMISSIONS["MANIFEST_PERMISSION"][perm][0].upper() ] += 1
for j in perms:
if j == perm:
for k in perms[j]:
flags[k] += 1
except:
debug("Unknown permission %s" % perm)
def analyze_dex(self, d, dx, flags):
flags["REFLECTION"] = int( analysis.is_reflection_code(dx) )
flags["NATIVE"] = int( analysis.is_native_code(dx) )
flags["DYNAMIC"] = int( analysis.is_dyn_code(dx) )
flags["CRYPTO"] = int( analysis.is_crypto_code(dx) )
class MethodScore(object):
def __init__(self, length, matches, android_entropy, java_entropy, permissions, similarity_matches):
self.system = create_system_method_score()
#export_system( self.system, "./output" )
val_permissions = 0
for i in permissions:
val_permissions += RISK_VALUES[ GENERAL_PERMISSIONS_RISK[ i[1][0] ] ]
try:
for j in PERMISSIONS_RISK[ i[0] ]:
val_permissions += RISK_VALUES[ j ]
except KeyError:
pass
print length, matches, android_entropy, java_entropy, similarity_matches, val_permissions
output_values = {"output_method_score" : 0.0}
input_val = {}
input_val['input_Length_MS'] = length
input_val['input_Match_MS'] = matches
input_val['input_AndroidEntropy_MS'] = android_entropy
input_val['input_JavaEntropy_MS'] = java_entropy
input_val['input_Permissions_MS'] = val_permissions
input_val['input_Similarity_MS'] = similarity_matches
self.system.calculate(input=input_val, output = output_values)
self.score = output_values[ "output_method_score" ]
def get_score(self):
return self.score
| apache-2.0 |
rosmo/ansible | lib/ansible/modules/packaging/os/rhn_register.py | 17 | 15063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) James Laska
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author:
- James Laska (@jlaska)
notes:
- This is for older Red Hat products. You probably want the M(redhat_subscription) module instead.
- In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
- either libxml2 or lxml
options:
state:
description:
- Whether to register (C(present)), or unregister (C(absent)) a system.
type: str
choices: [ absent, present ]
default: present
username:
description:
- Red Hat Network username.
type: str
password:
description:
- Red Hat Network password.
type: str
server_url:
description:
- Specify an alternative Red Hat Network server URL.
- The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
type: str
activationkey:
description:
- Supply an activation key for use with registration.
type: str
profilename:
description:
- Supply an profilename for use with registration.
type: str
version_added: "2.0"
ca_cert:
description:
- Supply a custom ssl CA certificate file for use with registration.
type: path
version_added: "2.1"
aliases: [ sslcacert ]
systemorgid:
description:
- Supply an organizational id for use with registration.
type: str
version_added: "2.1"
channels:
description:
- Optionally specify a list of channels to subscribe to upon successful registration.
type: list
default: []
enable_eus:
description:
- If C(no), extended update support will be requested.
type: bool
default: no
nopackages:
description:
- If C(yes), the registered node will not upload its installed packages information to Satellite server.
type: bool
default: no
version_added: "2.5"
'''
EXAMPLES = r'''
- name: Unregister system from RHN
rhn_register:
state: absent
username: joe_user
password: somepass
- name: Register as user with password and auto-subscribe to available content
rhn_register:
state: present
username: joe_user
password: somepass
- name: Register with activationkey and enable extended update support
rhn_register:
state: present
activationkey: 1-222333444
enable_eus: yes
- name: Register with activationkey and set a profilename which may differ from the hostname
rhn_register:
state: present
activationkey: 1-222333444
profilename: host.example.com.custom
- name: Register as user with password against a satellite server
rhn_register:
state: present
username: joe_user
password: somepass
server_url: https://xmlrpc.my.satellite/XMLRPC
- name: Register as user with password and enable channels
rhn_register:
state: present
username: joe_user
password: somepass
channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
RETURN = r'''
# Default return values
'''
import os
import sys
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
HAS_UP2DATE_CLIENT = True
except ImportError:
HAS_UP2DATE_CLIENT = False
# INSERT REDHAT SNIPPETS
from ansible.module_utils import redhat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import urllib, xmlrpc_client
class Rhn(redhat.RegistrationBase):
def __init__(self, module=None, username=None, password=None):
redhat.RegistrationBase.__init__(self, module, username, password)
self.config = self.load_config()
self.server = None
self.session = None
def logout(self):
if self.session is not None:
self.server.auth.logout(self.session)
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
if not HAS_UP2DATE_CLIENT:
return None
config = up2date_client.config.initUp2dateConfig()
return config
@property
def server_url(self):
return self.config['serverURL']
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urllib.parse.urlparse(self.server_url)
return url[1].replace('xmlrpc.', '')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
raise Exception('"libxml2" or "lxml" is required for this module.')
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure_server_url(self, server_url):
'''
Configure server_url for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
redhat.RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
if self.username:
register_cmd.extend(['--username', self.username, '--password', self.password])
if self.server_url:
register_cmd.extend(['--serverUrl', self.server_url])
if enable_eus:
register_cmd.append('--use-eus-channel')
if nopackages:
register_cmd.append('--nopackages')
if activationkey is not None:
register_cmd.extend(['--activationkey', activationkey])
if profilename is not None:
register_cmd.extend(['--profilename', profilename])
if sslcacert is not None:
register_cmd.extend(['--sslCACert', sslcacert])
if systemorgid is not None:
register_cmd.extend(['--systemorgid', systemorgid])
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpc_client.ServerProxy(url)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels):
if not channels:
return
if self._is_hosted():
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
else:
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
current_channels = [item['label'] for item in current_channels]
new_base = None
new_childs = []
for ch in channels:
if ch in current_channels:
continue
if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
new_base = ch
else:
if ch not in new_childs:
new_childs.append(ch)
out_base = 0
out_childs = 0
if new_base:
out_base = self.api('system.setBaseChannel', self.systemid, new_base)
if new_childs:
out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
return out_base and out_childs
def _is_hosted(self):
'''
Return True if we are running against Hosted (rhn.redhat.com) or
False otherwise (when running against Satellite or Spacewalk)
'''
return 'rhn.redhat.com' in self.hostname
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
username=dict(type='str'),
password=dict(type='str', no_log=True),
server_url=dict(type='str'),
activationkey=dict(type='str', no_log=True),
profilename=dict(type='str'),
ca_cert=dict(type='path', aliases=['sslcacert']),
systemorgid=dict(type='str'),
enable_eus=dict(type='bool', default=False),
nopackages=dict(type='bool', default=False),
channels=dict(type='list', default=[]),
),
# username/password is required for state=absent, or if channels is not empty
# (basically anything that uses self.api requires username/password) but it doesnt
# look like we can express that with required_if/required_together/mutually_exclusive
# only username+password can be used for unregister
required_if=[['state', 'absent', ['username', 'password']]],
)
if not HAS_UP2DATE_CLIENT:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
state = module.params['state']
activationkey = module.params['activationkey']
profilename = module.params['profilename']
sslcacert = module.params['ca_cert']
systemorgid = module.params['systemorgid']
channels = module.params['channels']
enable_eus = module.params['enable_eus']
nopackages = module.params['nopackages']
rhn = Rhn(module=module, username=username, password=password)
# use the provided server url and persist it to the rhn config.
if server_url:
rhn.configure_server_url(server_url)
if not rhn.server_url:
module.fail_json(
msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
)
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
try:
rhn.enable()
rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
rhn.subscribe(channels)
except Exception as exc:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
finally:
rhn.logout()
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
if not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
try:
rhn.unregister()
except Exception as exc:
module.fail_json(msg="Failed to unregister: %s" % exc)
finally:
rhn.logout()
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
if __name__ == '__main__':
main()
| gpl-3.0 |
t0mk/ansible | contrib/inventory/docker.py | 36 | 33532 | #!/usr/bin/env python
#
# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
# Chris Houseknecht <house@redhat.com>
# James Tanner <jtanner@redhat.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
Docker Inventory Script
=======================
The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
because the inventory is generated at run-time rather than being read from a static file. The script generates the
inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
script contacts can be defined using environment variables or a configuration file.
Requirements
------------
Using the docker modules requires having docker-py <https://docker-py.readthedocs.org/en/stable/>
installed on the host running Ansible. To install docker-py:
pip install docker-py
Run for Specific Host
---------------------
When run for a specific container using the --host option this script returns the following hostvars:
{
"ansible_ssh_host": "",
"ansible_ssh_port": 0,
"docker_apparmorprofile": "",
"docker_args": [],
"docker_config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/hello"
],
"Domainname": "",
"Entrypoint": null,
"Env": null,
"Hostname": "9f2f80b0a702",
"Image": "hello-world",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": null,
"WorkingDir": ""
},
"docker_created": "2016-04-18T02:05:59.659599249Z",
"docker_driver": "aufs",
"docker_execdriver": "native-0.2",
"docker_execids": null,
"docker_graphdriver": {
"Data": null,
"Name": "aufs"
},
"docker_hostconfig": {
"Binds": null,
"BlkioWeight": 0,
"CapAdd": null,
"CapDrop": null,
"CgroupParent": "",
"ConsoleSize": [
0,
0
],
"ContainerIDFile": "",
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuShares": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"KernelMemory": 0,
"Links": null,
"LogConfig": {
"Config": {},
"Type": "json-file"
},
"LxcConf": null,
"Memory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"NetworkMode": "default",
"OomKillDisable": false,
"PidMode": "host",
"PortBindings": null,
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"RestartPolicy": {
"MaximumRetryCount": 0,
"Name": ""
},
"SecurityOpt": [
"label:disable"
],
"UTSMode": "",
"Ulimits": null,
"VolumeDriver": "",
"VolumesFrom": null
},
"docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
"docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
"docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
"docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
"docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
"docker_mountlabel": "",
"docker_mounts": [],
"docker_name": "/hello-world",
"docker_networksettings": {
"Bridge": "",
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"HairpinMode": false,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"MacAddress": "",
"Networks": {
"bridge": {
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": ""
}
},
"Ports": null,
"SandboxID": "",
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null
},
"docker_path": "/hello",
"docker_processlabel": "",
"docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
"docker_restartcount": 0,
"docker_short_id": "9f2f80b0a7023",
"docker_state": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2016-04-18T02:06:00.296619369Z",
"OOMKilled": false,
"Paused": false,
"Pid": 0,
"Restarting": false,
"Running": false,
"StartedAt": "2016-04-18T02:06:00.272065041Z",
"Status": "exited"
}
}
Groups
------
When run in --list mode (the default), container instances are grouped by:
- container id
- container name
- container short id
- image_name (image_<image name>)
- docker_host
- running
- stopped
Configuration:
--------------
You can control the behavior of the inventory script by passing arguments, defining environment variables, or
creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
is command line args, then the docker.yml file and finally environment variables.
Environment variables:
......................
To connect to a single Docker API the following variables can be defined in the environment to control the connection
options. These are the same environment variables used by the Docker modules.
DOCKER_HOST
The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
DOCKER_API_VERSION:
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
by docker-py.
DOCKER_TIMEOUT:
The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
DOCKER_TLS:
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
Defaults to False.
DOCKER_TLS_VERIFY:
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
Default is False
DOCKER_TLS_HOSTNAME:
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
to localhost.
DOCKER_CERT_PATH:
Path to the directory containing the client certificate, client key and CA certificate.
DOCKER_SSL_VERSION:
Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
was 1.0
In addition to the connection variables there are a couple variables used to control the execution and output of the
script:
DOCKER_CONFIG_FILE
Path to the configuration file. Defaults to ./docker.yml.
DOCKER_PRIVATE_SSH_PORT:
The private port (container port) on which SSH is listening for connections. Defaults to 22.
DOCKER_DEFAULT_IP:
The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
Configuration File
..................
Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
The default name of the file is derived from the name of the inventory script. By default the script will look for
basename of the script (i.e. docker) with an extension of '.yml'.
You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
Here's what you can define in docker_inventory.yml:
defaults
Defines a default connection. Defaults will be taken from this and applied to any values not provided
for a host defined in the hosts list.
hosts
If you wish to get inventory from more than one Docker host, define a hosts list.
For the default host and each host in the hosts list define the following attributes:
host:
description: The URL or Unix socket path used to connect to the Docker API.
required: yes
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
tls_verify:
description: Connect using TLS without verifying the authenticity of the Docker host server.
default: false
required: false
cert_path:
description: Path to the client's TLS certificate file.
default: null
required: false
cacert_path:
description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
default: null
required: false
key_path:
description: Path to the client's TLS key file.
default: null
required: false
version:
description: The Docker API version.
required: false
default: will be supplied by the docker-py module.
timeout:
description: The amount of time in seconds to wait on an API response.
required: false
default: 60
default_ip:
description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
required: false
default: 127.0.0.1
private_ssh_port:
description: The port containers use for SSH
required: false
default: 22
Examples
--------
# Connect to the Docker API on localhost port 4243 and format the JSON output
DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
# Any container's ssh port exposed on 0.0.0.0 will be mapped to
# another IP address (where Ansible will attempt to connect via SSH)
DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
# Run as input to a playbook:
ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
# Simple playbook to invoke with the above example:
- name: Test docker_inventory
hosts: all
connection: local
gather_facts: no
tasks:
- debug: msg="Container - {{ inventory_hostname }}"
'''
import os
import sys
import json
import argparse
import re
import yaml
from collections import defaultdict
# Manipulation of the path is needed because the docker-py
# module is imported by the name docker, and because this file
# is also named docker
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
try:
del sys.path[sys.path.index(path)]
except:
pass
HAS_DOCKER_PY = True
HAS_DOCKER_ERROR = False
try:
from docker import Client
from docker.errors import APIError, TLSParameterError
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_IP = '127.0.0.1'
DEFAULT_SSH_PORT = '22'
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
DOCKER_ENV_ARGS = dict(
config_file='DOCKER_CONFIG_FILE',
docker_host='DOCKER_HOST',
api_version='DOCKER_API_VERSION',
cert_path='DOCKER_CERT_PATH',
ssl_version='DOCKER_SSL_VERSION',
tls='DOCKER_TLS',
tls_verify='DOCKER_TLS_VERIFY',
timeout='DOCKER_TIMEOUT',
private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
default_ip='DOCKER_DEFAULT_IP',
)
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def log(msg, pretty_print=False):
if pretty_print:
print(json.dumps(msg, sort_keys=True, indent=2))
else:
print(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, auth_params, debug):
self.auth_params = auth_params
self.debug = debug
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def fail(self, msg):
fail(msg)
def log(self, msg, pretty_print=False):
if self.debug:
log(msg, pretty_print)
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("auth params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
"You may also use TLS without verification by setting the tls parameter to true." \
% (self.auth_params['tls_hostname'], match.group(1))
self.fail(msg)
self.fail("SSL Exception: %s" % (error))
class EnvArgs(object):
def __init__(self):
self.config_file = None
self.docker_host = None
self.api_version = None
self.cert_path = None
self.ssl_version = None
self.tls = None
self.tls_verify = None
self.tls_hostname = None
self.timeout = None
self.default_ssh_port = None
self.default_ip = None
class DockerInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
self._env_args = self._parse_env_args()
self.groups = defaultdict(list)
self.hostvars = defaultdict(dict)
def run(self):
config_from_file = self._parse_config_file()
if not config_from_file:
config_from_file = dict()
docker_hosts = self.get_hosts(config_from_file)
for host in docker_hosts:
client = AnsibleDockerClient(host, self._args.debug)
self.get_inventory(client, host)
if not self._args.host:
self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
self.groups['_meta'] = dict(
hostvars=self.hostvars
)
print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
else:
print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
sys.exit(0)
def get_inventory(self, client, host):
ssh_port = host.get('default_ssh_port')
default_ip = host.get('default_ip')
hostname = host.get('docker_host')
try:
containers = client.containers(all=True)
except Exception as exc:
self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list()).pop(0).lstrip('/')
except IndexError:
name = short_id
if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
try:
inspect = client.inspect_container(id)
except Exception as exc:
self.fail("Error inspecting container %s - %s" % (name, str(exc)))
running = inspect.get('State', dict()).get('Running')
# Add container to groups
image_name = inspect.get('Config', dict()).get('Image')
if image_name:
self.groups["image_%s" % (image_name)].append(name)
self.groups[id].append(name)
self.groups[name].append(name)
if short_id not in self.groups:
self.groups[short_id].append(name)
self.groups[hostname].append(name)
if running is True:
self.groups['running'].append(name)
else:
self.groups['stopped'].append(name)
# Figure ous ssh IP and Port
try:
# Lookup the public facing port Nat'ed to ssh port.
port = client.port(container, ssh_port)[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
facts = dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', int()),
docker_name=name,
docker_short_id=short_id
)
for key in inspect:
fact_key = self._slugify(key)
facts[fact_key] = inspect.get(key)
self.hostvars[name].update(facts)
def _slugify(self, value):
return 'docker_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def get_hosts(self, config):
'''
Determine the list of docker hosts we need to talk to.
:param config: dictionary read from config file. can be empty.
:return: list of connection dictionaries
'''
hosts = list()
hosts_list = config.get('hosts')
defaults = config.get('defaults', dict())
self.log('defaults:')
self.log(defaults, pretty_print=True)
def_host = defaults.get('host')
def_tls = defaults.get('tls')
def_tls_verify = defaults.get('tls_verify')
def_tls_hostname = defaults.get('tls_hostname')
def_ssl_version = defaults.get('ssl_version')
def_cert_path = defaults.get('cert_path')
def_cacert_path = defaults.get('cacert_path')
def_key_path = defaults.get('key_path')
def_version = defaults.get('version')
def_timeout = defaults.get('timeout')
def_ip = defaults.get('default_ip')
def_ssh_port = defaults.get('private_ssh_port')
if hosts_list:
# use hosts from config file
for host in hosts_list:
docker_host = host.get('host') or def_host or self._args.docker_host or \
self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = host.get('version') or def_version or self._args.api_version or \
self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
self._env_args.tls_hostname
tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
self._env_args.ssl_version
cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = host.get('key_path') or def_key_path or self._args.key_path or \
self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
DEFAULT_TIMEOUT_SECONDS
default_ip = host.get('default_ip') or def_ip or self._args.default_ip_address or \
DEFAULT_IP
default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
else:
# use default definition
docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
api_version = def_version or self._args.api_version or self._env_args.api_version or \
DEFAULT_DOCKER_API_VERSION
tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname
tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path:
cacert_path = os.path.join(cacert_path, 'ca.pem')
key_path = def_key_path or self._args.key_path or self._env_args.cert_path
if key_path and key_path == self._env_args.cert_path:
key_path = os.path.join(key_path, 'key.pem')
timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
default_ip = def_ip or self._args.default_ip_address or DEFAULT_IP
default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
host_dict = dict(
docker_host=docker_host,
api_version=api_version,
tls=tls,
tls_verify=tls_verify,
tls_hostname=tls_hostname,
cert_path=cert_path,
cacert_path=cacert_path,
key_path=key_path,
ssl_version=ssl_version,
timeout=timeout,
default_ip=default_ip,
default_ssh_port=default_ssh_port,
)
hosts.append(host_dict)
self.log("hosts: ")
self.log(hosts, pretty_print=True)
return hosts
def _parse_config_file(self):
config = dict()
config_path = None
if self._args.config_file:
config_path = self._args.config_file
elif self._env_args.config_file:
config_path = self._env_args.config_file
if config_path:
try:
config_file = os.path.abspath(config_path)
except:
config_file = None
if config_file and os.path.exists(config_file):
with open(config_file) as f:
try:
config = yaml.safe_load(f.read())
except Exception as exc:
self.fail("Error: parsing %s - %s" % (config_path, str(exc)))
return config
def log(self, msg, pretty_print=False):
if self._args.debug:
log(msg, pretty_print)
def fail(self, msg):
fail(msg)
def _parse_env_args(self):
args = EnvArgs()
for key, value in DOCKER_ENV_ARGS.items():
if os.environ.get(value):
val = os.environ.get(value)
if val in BOOLEANS_TRUE:
val = True
if val in BOOLEANS_FALSE:
val = False
setattr(args, key, val)
return args
def _parse_cli_args(self):
# Parse command line arguments
basename = os.path.splitext(os.path.basename(__file__))[0]
default_config = basename + '.yml'
parser = argparse.ArgumentParser(
description='Return Ansible inventory for one or more Docker hosts.')
parser.add_argument('--list', action='store_true', default=True,
help='List all containers (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Only get information for a specific container.')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--config-file', action='store', default=default_config,
help="Name of the config file to use. Default is %s" % (default_config))
parser.add_argument('--docker-host', action='store', default=None,
help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
% (DEFAULT_DOCKER_HOST))
parser.add_argument('--tls-hostname', action='store', default='localhost',
help="Host name to expect in TLS certs. Defaults to 'localhost'")
parser.add_argument('--api-version', action='store', default=None,
help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
parser.add_argument('--timeout', action='store', default=None,
help="Docker connection timeout in seconds. Defaults to %s"
% (DEFAULT_TIMEOUT_SECONDS))
parser.add_argument('--cacert-path', action='store', default=None,
help="Path to the TLS certificate authority pem file.")
parser.add_argument('--cert-path', action='store', default=None,
help="Path to the TLS certificate pem file.")
parser.add_argument('--key-path', action='store', default=None,
help="Path to the TLS encryption key pem file.")
parser.add_argument('--ssl-version', action='store', default=None,
help="TLS version number")
parser.add_argument('--tls', action='store_true', default=None,
help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
parser.add_argument('--tls-verify', action='store_true', default=None,
help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
parser.add_argument('--private-ssh-port', action='store', default=None,
help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
parser.add_argument('--default-ip-address', action='store', default=None,
help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
return parser.parse_args()
def _json_format_dict(self, data, pretty_print=False):
# format inventory data for output
if pretty_print:
return json.dumps(data, sort_keys=True, indent=4)
else:
return json.dumps(data)
def main():
if not HAS_DOCKER_PY:
fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
DockerInventory().run()
main()
| gpl-3.0 |
dallaspythondojo/python | Quintana_Jerrod/Assignments/f+sql_projects/email_validation_with_db/mysqlconnection.py | 2 | 2248 | """ import the necessary modules """
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': 'email_validation', # we got db as an argument
# my note: The database name above is the only db from the original copy of this document that changes
'user': 'root',
'password': '',
# password is blank because I never set it
'port': '3306' # change the port to match the port your SQL server is running on
}
# this will use the above values to generate the path to connect to your sql database
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# establish the connection to database
self.db = SQLAlchemy(app)
# this is the method we will use to query the database
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
# if the query was a select
# convert the result to a list of dictionaries
list_result = [dict(r) for r in result]
# return the results as a list of dictionaries
return list_result
elif query[0:6].lower() == 'insert':
# if the query was an insert, return the id of the
# commit changes
self.db.session.commit()
# row that was inserted
return result.lastrowid
else:
# if the query was an update or delete, return nothing and commit changes
self.db.session.commit()
# This is the module method to be called by the user in server.py. Make sure to provide the db name!
# My note: best I can tell, these two db's don't change, only the middle one
def MySQLConnector(app, db):
return MySQLConnection(app, db)
| mit |
rcrowder/nupic | examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py | 20 | 15713 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
vijayanandau/KnowledgeShare | makahiki/apps/lib/gviz_api/gviz_api.py | 9 | 44805 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts Python data into data for Google Visualization API clients.
This library can be used to create a google.visualization.DataTable usable by
visualizations built on the Google Visualization API. Output formats are raw
JSON, JSON response, JavaScript, CSV, and HTML table.
See http://code.google.com/apis/visualization/ for documentation on the
Google Visualization API.
"""
__author__ = "Amit Weinstein, Misha Seltzer, Jacob Baskin"
import cgi
import cStringIO
import csv
import datetime
try:
import json
except ImportError:
import simplejson as json
import types
class DataTableException(Exception):
"""The general exception object thrown by DataTable."""
pass
class DataTableJSONEncoder(json.JSONEncoder):
"""JSON encoder that handles date/time/datetime objects correctly."""
def __init__(self):
json.JSONEncoder.__init__(self,
separators=(",", ":"),
ensure_ascii=False)
def default(self, o):
if isinstance(o, datetime.datetime):
return "Date(%d,%d,%d,%d,%d,%d)" % (
o.year, o.month - 1, o.day, o.hour, o.minute, o.second)
elif isinstance(o, datetime.date):
return "Date(%d,%d,%d)" % (o.year, o.month - 1, o.day)
elif isinstance(o, datetime.time):
return [o.hour, o.minute, o.second]
else:
return super(DataTableJSONEncoder, self).default(self, o)
class DataTable(object):
"""Wraps the data to convert to a Google Visualization API DataTable.
Create this object, populate it with data, then call one of the ToJS...
methods to return a string representation of the data in the format described.
You can clear all data from the object to reuse it, but you cannot clear
individual cells, rows, or columns. You also cannot modify the table schema
specified in the class constructor.
You can add new data one or more rows at a time. All data added to an
instantiated DataTable must conform to the schema passed in to __init__().
You can reorder the columns in the output table, and also specify row sorting
order by column. The default column order is according to the original
table_description parameter. Default row sort order is ascending, by column
1 values. For a dictionary, we sort the keys for order.
The data and the table_description are closely tied, as described here:
The table schema is defined in the class constructor's table_description
parameter. The user defines each column using a tuple of
(id[, type[, label[, custom_properties]]]). The default value for type is
string, label is the same as ID if not specified, and custom properties is
an empty dictionary if not specified.
table_description is a dictionary or list, containing one or more column
descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
element, or dictionary element must eventually be defined as
a column description tuple. Here's an example of a dictionary where the key
is a tuple, and the value is a list of two tuples:
{('a', 'number'): [('b', 'number'), ('c', 'string')]}
This flexibility in data entry enables you to build and manipulate your data
in a Python structure that makes sense for your program.
Add data to the table using the same nested design as the table's
table_description, replacing column descriptor tuples with cell data, and
each row is an element in the top level collection. This will be a bit
clearer after you look at the following examples showing the
table_description, matching data, and the resulting table:
Columns as list of tuples [col1, col2, col3]
table_description: [('a', 'number'), ('b', 'string')]
AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
Table:
a b <--- these are column ids/labels
1 z
2 w
4 o
5 k
Dictionary of columns, where key is a column, and value is a list of
columns {col1: [col2, col3]}
table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
Table:
a b c
1 2 z
3 4 w
Dictionary where key is a column, and the value is itself a dictionary of
columns {col1: {col2, col3}}
table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
Table:
a b c
1 2 z
3 4 w
"""
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
self.__columns = self.TableDescriptionParser(table_description)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.LoadData(data)
@staticmethod
def CoerceValue(value, value_type):
"""Coerces a single value into the type expected for its column.
Internal helper method.
Args:
value: The value which should be converted
value_type: One of "string", "number", "boolean", "date", "datetime" or
"timeofday".
Returns:
An item of the Python type appropriate to the given value_type. Strings
are also converted to Unicode using UTF-8 encoding if necessary.
If a tuple is given, it should be in one of the following forms:
- (value, formatted value)
- (value, formatted value, custom properties)
where the formatted value is a string, and custom properties is a
dictionary of the custom properties for this cell.
To specify custom properties without specifying formatted value, one can
pass None as the formatted value.
One can also have a null-valued cell with formatted value and/or custom
properties by specifying None for the value.
This method ignores the custom properties except for checking that it is a
dictionary. The custom properties are handled in the ToJSon and ToJSCode
methods.
The real type of the given value is not strictly checked. For example,
any type can be used for string - as we simply take its str( ) and for
boolean value we just check "if value".
Examples:
CoerceValue(None, "string") returns None
CoerceValue((5, "5$"), "number") returns (5, "5$")
CoerceValue(100, "string") returns "100"
CoerceValue(0, "boolean") returns False
Raises:
DataTableException: The value and type did not match in a not-recoverable
way, for example given value 'abc' for type 'number'.
"""
if isinstance(value, tuple):
# In case of a tuple, we run the same function on the value itself and
# add the formatted value.
if (len(value) not in [2, 3] or
(len(value) == 3 and not isinstance(value[2], dict))):
raise DataTableException("Wrong format for value and formatting - %s." %
str(value))
if not isinstance(value[1], types.StringTypes + (types.NoneType,)):
raise DataTableException("Formatted value is not string, given %s." %
type(value[1]))
js_value = DataTable.CoerceValue(value[0], value_type)
return (js_value,) + value[1:]
t_value = type(value)
if value is None:
return value
if value_type == "boolean":
return bool(value)
elif value_type == "number":
if isinstance(value, (int, long, float)):
return value
raise DataTableException("Wrong type %s when expected number" % t_value)
elif value_type == "string":
if isinstance(value, unicode):
return value
else:
return str(value).decode("utf-8")
elif value_type == "date":
if isinstance(value, datetime.datetime):
return datetime.date(value.year, value.month, value.day)
elif isinstance(value, datetime.date):
return value
else:
raise DataTableException("Wrong type %s when expected date" % t_value)
elif value_type == "timeofday":
if isinstance(value, datetime.datetime):
return datetime.time(value.hour, value.minute, value.second)
elif isinstance(value, datetime.time):
return value
else:
raise DataTableException("Wrong type %s when expected time" % t_value)
elif value_type == "datetime":
if isinstance(value, datetime.datetime):
return value
else:
raise DataTableException("Wrong type %s when expected datetime" %
t_value)
# If we got here, it means the given value_type was not one of the
# supported types.
raise DataTableException("Unsupported type %s" % value_type)
@staticmethod
def EscapeForJSCode(encoder, value):
if value is None:
return "null"
elif isinstance(value, datetime.datetime):
return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
value.month - 1, # To match JS
value.day,
value.hour,
value.minute,
value.second)
elif isinstance(value, datetime.date):
return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
else:
return encoder.encode(value)
@staticmethod
def ToString(value):
if value is None:
return "(empty)"
elif isinstance(value, (datetime.datetime,
datetime.date,
datetime.time)):
return str(value)
elif isinstance(value, unicode):
return value
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value).decode("utf-8")
@staticmethod
def ColumnTypeParser(description):
"""Parses a single column description. Internal helper method.
Args:
description: a column description in the possible formats:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
Returns:
Dictionary with the following keys: id, label, type, and
custom_properties where:
- If label not given, it equals the id.
- If type not given, string is used by default.
- If custom properties are not given, an empty dictionary is used by
default.
Raises:
DataTableException: The column description did not match the RE, or
unsupported type was passed.
"""
if not description:
raise DataTableException("Description error: empty description given")
if not isinstance(description, (types.StringTypes, tuple)):
raise DataTableException("Description error: expected either string or "
"tuple, got %s." % type(description))
if isinstance(description, types.StringTypes):
description = (description,)
# According to the tuple's length, we fill the keys
# We verify everything is of type string
for elem in description[:3]:
if not isinstance(elem, types.StringTypes):
raise DataTableException("Description error: expected tuple of "
"strings, current element of type %s." %
type(elem))
desc_dict = {"id": description[0],
"label": description[0],
"type": "string",
"custom_properties": {}}
if len(description) > 1:
desc_dict["type"] = description[1].lower()
if len(description) > 2:
desc_dict["label"] = description[2]
if len(description) > 3:
if not isinstance(description[3], dict):
raise DataTableException("Description error: expected custom "
"properties of type dict, current element "
"of type %s." % type(description[3]))
desc_dict["custom_properties"] = description[3]
if len(description) > 4:
raise DataTableException("Description error: tuple of length > 4")
if desc_dict["type"] not in ["string", "number", "boolean",
"date", "datetime", "timeofday"]:
raise DataTableException(
"Description error: unsupported type '%s'" % desc_dict["type"])
return desc_dict
@staticmethod
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (types.StringTypes, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(table_description.keys()[0], types.StringTypes) and
isinstance(table_description.values()[0], tuple) and
len(table_description.values()[0]) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] +
DataTable.TableDescriptionParser(table_description.values()[0],
depth=depth + 1))
@property
def columns(self):
"""Returns the parsed table description."""
return self.__columns
def NumberOfRows(self):
"""Returns the number of rows in the current data stored in the table."""
return len(self.__data)
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties)
def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties)
def AppendData(self, data, custom_properties=None):
"""Appends new data to the table.
Data is appended in rows. Data must comply with
the table schema passed in to __init__(). See CoerceValue() for a list
of acceptable data types. See the class documentation for more information
and examples of schema and data values.
Args:
data: The row to add to the table. The data must conform to the table
description format.
custom_properties: A dictionary of string to string, representing the
custom properties to add to all the rows.
Raises:
DataTableException: The data structure does not match the description.
"""
# If the maximal depth is 0, we simply iterate over the data table
# lines and insert them using _InnerAppendData. Otherwise, we simply
# let the _InnerAppendData handle all the levels.
if not self.__columns[-1]["depth"]:
for row in data:
self._InnerAppendData(({}, custom_properties), row, 0)
else:
self._InnerAppendData(({}, custom_properties), data, 0)
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1)
def _PreparedData(self, order_by=()):
"""Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
"""
if not order_by:
return self.__data
proper_sort_keys = []
if isinstance(order_by, types.StringTypes) or (
isinstance(order_by, tuple) and len(order_by) == 2 and
order_by[1].lower() in ["asc", "desc"]):
order_by = (order_by,)
for key in order_by:
if isinstance(key, types.StringTypes):
proper_sort_keys.append((key, 1))
elif (isinstance(key, (list, tuple)) and len(key) == 2 and
key[1].lower() in ("asc", "desc")):
proper_sort_keys.append((key[0], key[1].lower() == "asc" and 1 or -1))
else:
raise DataTableException("Expected tuple with second value: "
"'asc' or 'desc'")
def SortCmpFunc(row1, row2):
"""cmp function for sorted. Compares by keys and 'asc'/'desc' keywords."""
for key, asc_mult in proper_sort_keys:
cmp_result = asc_mult * cmp(row1[0].get(key), row2[0].get(key))
if cmp_result:
return cmp_result
return 0
return sorted(self.__data, cmp=SortCmpFunc)
def ToJSCode(self, name, columns_order=None, order_by=()):
"""Writes the data table as a JS code string.
This method writes a string of JS code that can be run to
generate a DataTable with the specified data. Typically used for debugging
only.
Args:
name: The name of the table. The name would be used as the DataTable's
variable name in the created JS code.
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
A string of JS code that, when run, generates a DataTable with the given
name and the data stored in the DataTable object.
Example result:
"var tab1 = new google.visualization.DataTable();
tab1.addColumn("string", "a", "a");
tab1.addColumn("number", "b", "b");
tab1.addColumn("boolean", "c", "c");
tab1.addRows(10);
tab1.setCell(0, 0, "a");
tab1.setCell(0, 1, 1, null, {"foo": "bar"});
tab1.setCell(0, 2, true);
...
tab1.setCell(9, 0, "c");
tab1.setCell(9, 1, 3, "3$");
tab1.setCell(9, 2, false);"
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# We first create the table with the given name
jscode = "var %s = new google.visualization.DataTable();\n" % name
if self.custom_properties:
jscode += "%s.setTableProperties(%s);\n" % (
name, encoder.encode(self.custom_properties))
# We add the columns to the table
for i, col in enumerate(columns_order):
jscode += "%s.addColumn(%s, %s, %s);\n" % (
name,
encoder.encode(col_dict[col]["type"]),
encoder.encode(col_dict[col]["label"]),
encoder.encode(col_dict[col]["id"]))
if col_dict[col]["custom_properties"]:
jscode += "%s.setColumnProperties(%d, %s);\n" % (
name, i, encoder.encode(col_dict[col]["custom_properties"]))
jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
# We now go over the data and add each row
for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
# We add all the elements of this row by their order
for (j, col) in enumerate(columns_order):
if col not in row or row[col] is None:
continue
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
cell_cp = ""
if len(value) == 3:
cell_cp = ", %s" % encoder.encode(row[col][2])
# We have a formatted value or custom property as well
jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
(name, i, j,
self.EscapeForJSCode(encoder, value[0]),
self.EscapeForJSCode(encoder, value[1]), cell_cp))
else:
jscode += "%s.setCell(%d, %d, %s);\n" % (
name, i, j, self.EscapeForJSCode(encoder, value))
if cp:
jscode += "%s.setRowProperties(%d, %s);\n" % (
name, i, encoder.encode(cp))
return jscode
def ToHtml(self, columns_order=None, order_by=()):
"""Writes the data table as an HTML table code string.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
Returns:
An HTML table code string.
Example result (the result is without the newlines):
<html><body><table border="1">
<thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
<tbody>
<tr><td>1</td><td>"z"</td><td>2</td></tr>
<tr><td>"3$"</td><td>"w"</td><td></td></tr>
</tbody>
</table></body></html>
Raises:
DataTableException: The data does not match the type.
"""
table_template = "<html><body><table border=\"1\">%s</table></body></html>"
columns_template = "<thead><tr>%s</tr></thead>"
rows_template = "<tbody>%s</tbody>"
row_template = "<tr>%s</tr>"
header_cell_template = "<th>%s</th>"
cell_template = "<td>%s</td>"
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
columns_list = []
for col in columns_order:
columns_list.append(header_cell_template %
cgi.escape(col_dict[col]["label"]))
columns_html = columns_template % "".join(columns_list)
rows_list = []
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
# For empty string we want empty quotes ("").
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value and we're going to use it
cells_list.append(cell_template % cgi.escape(self.ToString(value[1])))
else:
cells_list.append(cell_template % cgi.escape(self.ToString(value)))
rows_list.append(row_template % "".join(cells_list))
rows_html = rows_template % "".join(rows_list)
return table_template % (columns_html + rows_html)
def ToCsv(self, columns_order=None, order_by=(), separator=","):
"""Writes the data table as a CSV string.
Output is encoded in UTF-8 because the Python "csv" module can't handle
Unicode properly according to its documentation.
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData.
separator: Optional. The separator to use between the values.
Returns:
A CSV string representing the table.
Example result:
'a','b','c'
1,'z',2
3,'w',''
Raises:
DataTableException: The data does not match the type.
"""
csv_buffer = cStringIO.StringIO()
writer = csv.writer(csv_buffer, delimiter=separator)
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
writer.writerow([col_dict[col]["label"].encode("utf-8")
for col in columns_order])
# We now go over the data and add each row
for row, unused_cp in self._PreparedData(order_by):
cells_list = []
# We add all the elements of this row by their order
for col in columns_order:
value = ""
if col in row and row[col] is not None:
value = self.CoerceValue(row[col], col_dict[col]["type"])
if isinstance(value, tuple):
# We have a formatted value. Using it only for date/time types.
if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
cells_list.append(self.ToString(value[1]).encode("utf-8"))
else:
cells_list.append(self.ToString(value[0]).encode("utf-8"))
else:
cells_list.append(self.ToString(value).encode("utf-8"))
writer.writerow(cells_list)
return csv_buffer.getvalue()
def ToTsvExcel(self, columns_order=None, order_by=()):
"""Returns a file in tab-separated-format readable by MS Excel.
Returns a file in UTF-16 little endian encoding, with tabs separating the
values.
Args:
columns_order: Delegated to ToCsv.
order_by: Delegated to ToCsv.
Returns:
A tab-separated little endian UTF16 file representing the table.
"""
return (self.ToCsv(columns_order, order_by, separator="\t")
.decode("utf-8").encode("UTF-16LE"))
def _ToJSonObj(self, columns_order=None, order_by=()):
"""Returns an object suitable to be converted to JSON.
Args:
columns_order: Optional. A list of all column IDs in the order in which
you want them created in the output table. If specified,
all column IDs must be present.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A dictionary object for use by ToJSon or ToJSonResponse.
"""
if columns_order is None:
columns_order = [col["id"] for col in self.__columns]
col_dict = dict([(col["id"], col) for col in self.__columns])
# Creating the column JSON objects
col_objs = []
for col_id in columns_order:
col_obj = {"id": col_dict[col_id]["id"],
"label": col_dict[col_id]["label"],
"type": col_dict[col_id]["type"]}
if col_dict[col_id]["custom_properties"]:
col_obj["p"] = col_dict[col_id]["custom_properties"]
col_objs.append(col_obj)
# Creating the rows jsons
row_objs = []
for row, cp in self._PreparedData(order_by):
cell_objs = []
for col in columns_order:
value = self.CoerceValue(row.get(col, None), col_dict[col]["type"])
if value is None:
cell_obj = None
elif isinstance(value, tuple):
cell_obj = {"v": value[0]}
if len(value) > 1 and value[1] is not None:
cell_obj["f"] = value[1]
if len(value) == 3:
cell_obj["p"] = value[2]
else:
cell_obj = {"v": value}
cell_objs.append(cell_obj)
row_obj = {"c": cell_objs}
if cp:
row_obj["p"] = cp
row_objs.append(row_obj)
json_obj = {"cols": col_objs, "rows": row_objs}
if self.custom_properties:
json_obj["p"] = self.custom_properties
return json_obj
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},{v:null}]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoder = DataTableJSONEncoder()
return encoder.encode(
self._ToJSonObj(columns_order, order_by)).encode("utf-8")
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
response_handler="google.visualization.Query.setResponse"):
"""Writes a table as a JSON response that can be returned as-is to a client.
This method writes a JSON response to return to a client in response to a
Google Visualization API query. This string can be processed by the calling
page, and is used to deliver a data table to a visualization hosted on
a different page.
Args:
columns_order: Optional. Passed straight to self.ToJSon().
order_by: Optional. Passed straight to self.ToJSon().
req_id: Optional. The response id, as retrieved by the request.
response_handler: Optional. The response handler, as retrieved by the
request.
Returns:
A JSON response string to be received by JS the visualization Query
object. This response would be translated into a DataTable on the
client side.
Example result (newlines added for readability):
google.visualization.Query.setResponse({
'version':'0.6', 'reqId':'0', 'status':'OK',
'table': {cols: [...], rows: [...]}});
Note: The URL returning this string can be used as a data source by Google
Visualization Gadgets or from JS code.
"""
response_obj = {
"version": "0.6",
"reqId": str(req_id),
"table": self._ToJSonObj(columns_order, order_by),
"status": "ok"
}
encoder = DataTableJSONEncoder()
return "%s(%s);" % (response_handler,
encoder.encode(response_obj).encode("utf-8"))
def ToResponse(self, columns_order=None, order_by=(), tqx=""):
"""Writes the right response according to the request string passed in tqx.
This method parses the tqx request string (format of which is defined in
the documentation for implementing a data source of Google Visualization),
and returns the right response according to the request.
It parses out the "out" parameter of tqx, calls the relevant response
(ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
ToTsvExcel() for "tsv-excel") and passes the response function the rest of
the relevant request keys.
Args:
columns_order: Optional. Passed as is to the relevant response function.
order_by: Optional. Passed as is to the relevant response function.
tqx: Optional. The request string as received by HTTP GET. Should be in
the format "key1:value1;key2:value2...". All keys have a default
value, so an empty string will just do the default (which is calling
ToJSonResponse() with no extra parameters).
Returns:
A response string, as returned by the relevant response function.
Raises:
DataTableException: One of the parameters passed in tqx is not supported.
"""
tqx_dict = {}
if tqx:
tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
if tqx_dict.get("version", "0.6") != "0.6":
raise DataTableException(
"Version (%s) passed by request is not supported."
% tqx_dict["version"])
if tqx_dict.get("out", "json") == "json":
response_handler = tqx_dict.get("responseHandler",
"google.visualization.Query.setResponse")
return self.ToJSonResponse(columns_order, order_by,
req_id=tqx_dict.get("reqId", 0),
response_handler=response_handler)
elif tqx_dict["out"] == "html":
return self.ToHtml(columns_order, order_by)
elif tqx_dict["out"] == "csv":
return self.ToCsv(columns_order, order_by)
elif tqx_dict["out"] == "tsv-excel":
return self.ToTsvExcel(columns_order, order_by)
else:
raise DataTableException(
"'out' parameter: '%s' is not supported" % tqx_dict["out"])
| mit |
xhaggi/xbmc | tools/EventClients/lib/python/ps3/sixaxis.py | 155 | 11070 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import time
import sys
import struct
import math
import binascii
from bluetooth import set_l2cap_mtu
SX_SELECT = 1 << 0
SX_L3 = 1 << 1
SX_R3 = 1 << 2
SX_START = 1 << 3
SX_DUP = 1 << 4
SX_DRIGHT = 1 << 5
SX_DDOWN = 1 << 6
SX_DLEFT = 1 << 7
SX_L2 = 1 << 8
SX_R2 = 1 << 9
SX_L1 = 1 << 10
SX_R1 = 1 << 11
SX_TRIANGLE = 1 << 12
SX_CIRCLE = 1 << 13
SX_X = 1 << 14
SX_SQUARE = 1 << 15
SX_POWER = 1 << 16
SX_LSTICK_X = 0
SX_LSTICK_Y = 1
SX_RSTICK_X = 2
SX_RSTICK_Y = 3
# (map, key, amount index, axis)
keymap_sixaxis = {
SX_X : ('XG', 'A', 0, 0),
SX_CIRCLE : ('XG', 'B', 0, 0),
SX_SQUARE : ('XG', 'X', 0, 0),
SX_TRIANGLE : ('XG', 'Y', 0, 0),
SX_DUP : ('XG', 'dpadup', 0, 0),
SX_DDOWN : ('XG', 'dpaddown', 0, 0),
SX_DLEFT : ('XG', 'dpadleft', 0, 0),
SX_DRIGHT : ('XG', 'dpadright', 0, 0),
SX_START : ('XG', 'start', 0, 0),
SX_SELECT : ('XG', 'back', 0, 0),
SX_R1 : ('XG', 'white', 0, 0),
SX_R2 : ('XG', 'rightanalogtrigger', 6, 1),
SX_L2 : ('XG', 'leftanalogtrigger', 5, 1),
SX_L1 : ('XG', 'black', 0, 0),
SX_L3 : ('XG', 'leftthumbbutton', 0, 0),
SX_R3 : ('XG', 'rightthumbbutton', 0, 0),
}
# (data index, left map, left action, right map, right action)
axismap_sixaxis = {
SX_LSTICK_X : ('XG', 'leftthumbstickleft' , 'leftthumbstickright'),
SX_LSTICK_Y : ('XG', 'leftthumbstickup' , 'leftthumbstickdown'),
SX_RSTICK_X : ('XG', 'rightthumbstickleft', 'rightthumbstickright'),
SX_RSTICK_Y : ('XG', 'rightthumbstickup' , 'rightthumbstickdown'),
}
# to make sure all combination keys are checked first
# we sort the keymap's button codes in reverse order
# this guranties that any bit combined button code
# will be processed first
keymap_sixaxis_keys = keymap_sixaxis.keys()
keymap_sixaxis_keys.sort()
keymap_sixaxis_keys.reverse()
def getkeys(bflags):
keys = [];
for k in keymap_sixaxis_keys:
if (k & bflags) == k:
keys.append(k)
bflags = bflags & ~k
return keys;
def normalize(val):
upperlimit = 65281
lowerlimit = 2
val_range = upperlimit - lowerlimit
offset = 10000
val = (val + val_range / 2) % val_range
upperlimit -= offset
lowerlimit += offset
if val < lowerlimit:
val = lowerlimit
if val > upperlimit:
val = upperlimit
val = ((float(val) - offset) / (float(upperlimit) -
lowerlimit)) * 65535.0
if val <= 0:
val = 1
return val
def normalize_axis(val, deadzone):
val = float(val) - 127.5
val = val / 127.5
if abs(val) < deadzone:
return 0.0
if val > 0.0:
val = (val - deadzone) / (1.0 - deadzone)
else:
val = (val + deadzone) / (1.0 - deadzone)
return 65536.0 * val
def normalize_angle(val, valrange):
valrange *= 2
val = val / valrange
if val > 1.0:
val = 1.0
if val < -1.0:
val = -1.0
return (val + 0.5) * 65535.0
def average(array):
val = 0
for i in array:
val += i
return val / len(array)
def smooth(arr, val):
cnt = len(arr)
arr.insert(0, val)
arr.pop(cnt)
return average(arr)
def set_l2cap_mtu2(sock, mtu):
SOL_L2CAP = 6
L2CAP_OPTIONS = 1
s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12)
o = list( struct.unpack ("HHHBBBH", s) )
o[0] = o[1] = mtu
s = struct.pack ("HHHBBBH", *o)
try:
sock.setsockopt (SOL_L2CAP, L2CAP_OPTIONS, s)
except:
print "Warning: Unable to set mtu"
class sixaxis():
def __init__(self, xbmc, control_sock, interrupt_sock):
self.xbmc = xbmc
self.num_samples = 16
self.sumx = [0] * self.num_samples
self.sumy = [0] * self.num_samples
self.sumr = [0] * self.num_samples
self.axis_amount = [0, 0, 0, 0]
self.released = set()
self.pressed = set()
self.pending = set()
self.held = set()
self.psflags = 0
self.psdown = 0
self.mouse_enabled = 0
set_l2cap_mtu2(control_sock, 64)
set_l2cap_mtu2(interrupt_sock, 64)
time.sleep(0.25) # If we ask to quickly here, it sometimes doesn't start
# sixaxis needs this to enable it
# 0x53 => HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE
control_sock.send("\x53\xf4\x42\x03\x00\x00")
data = control_sock.recv(1)
# This command will turn on the gyro and set the leds
# I wonder if turning on the gyro makes it draw more current??
# it's probably a flag somewhere in the following command
# HID Command: HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUTPUT
# HID Report:1
bytes = [0x52, 0x1]
bytes.extend([0x00, 0x00, 0x00])
bytes.extend([0xFF, 0x72])
bytes.extend([0x00, 0x00, 0x00, 0x00])
bytes.extend([0x02]) # 0x02 LED1, 0x04 LED2 ... 0x10 LED4
# The following sections should set the blink frequncy of
# the leds on the controller, but i've not figured out how.
# These values where suggusted in a mailing list, but no explination
# for how they should be combined to the 5 bytes per led
#0xFF = 0.5Hz
#0x80 = 1Hz
#0x40 = 2Hz
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED4 [0xff, 0xff, 0x10, 0x10, 0x10]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED3 [0xff, 0x40, 0x08, 0x10, 0x10]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED2 [0xff, 0x00, 0x10, 0x30, 0x30]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED1 [0xff, 0x00, 0x10, 0x40, 0x10]
bytes.extend([0x00, 0x00, 0x00, 0x00, 0x00])
bytes.extend([0x00, 0x00, 0x00, 0x00, 0x00])
control_sock.send(struct.pack("42B", *bytes))
data = control_sock.recv(1)
def __del__(self):
self.close()
def close(self):
for key in (self.held | self.pressed):
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
self.held = set()
self.pressed = set()
def process_socket(self, isock):
data = isock.recv(50)
if data == None:
return False
return self.process_data(data)
def process_data(self, data):
if len(data) < 3:
return False
# make sure this is the correct report
if struct.unpack("BBB", data[0:3]) != (0xa1, 0x01, 0x00):
return False
if len(data) >= 48:
v1 = struct.unpack("h", data[42:44])
v2 = struct.unpack("h", data[44:46])
v3 = struct.unpack("h", data[46:48])
else:
v1 = [0,0]
v2 = [0,0]
v3 = [0,0]
if len(data) >= 50:
v4 = struct.unpack("h", data[48:50])
else:
v4 = [0,0]
ax = float(v1[0])
ay = float(v2[0])
az = float(v3[0])
rz = float(v4[0])
at = math.sqrt(ax*ax + ay*ay + az*az)
bflags = struct.unpack("<I", data[3:7])[0]
if len(data) > 27:
pressure = struct.unpack("BBBBBBBBBBBB", data[15:27])
else:
pressure = [0,0,0,0,0,0,0,0,0,0,0,0,0]
roll = -math.atan2(ax, math.sqrt(ay*ay + az*az))
pitch = math.atan2(ay, math.sqrt(ax*ax + az*az))
pitch -= math.radians(20);
xpos = normalize_angle(roll, math.radians(30))
ypos = normalize_angle(pitch, math.radians(30))
axis = struct.unpack("BBBB", data[7:11])
return self.process_input(bflags, pressure, axis, xpos, ypos)
def process_input(self, bflags, pressure, axis, xpos, ypos):
xval = smooth(self.sumx, xpos)
yval = smooth(self.sumy, ypos)
analog = False
for i in range(4):
config = axismap_sixaxis[i]
self.axis_amount[i] = self.send_singleaxis(axis[i], self.axis_amount[i], config[0], config[1], config[2])
if self.axis_amount[i] != 0:
analog = True
# send the mouse position to xbmc
if self.mouse_enabled == 1:
self.xbmc.send_mouse_position(xval, yval)
if (bflags & SX_POWER) == SX_POWER:
if self.psdown:
if (time.time() - self.psdown) > 5:
for key in (self.held | self.pressed):
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
raise Exception("PS3 Sixaxis powering off, user request")
else:
self.psdown = time.time()
else:
if self.psdown:
self.mouse_enabled = 1 - self.mouse_enabled
self.psdown = 0
keys = set(getkeys(bflags))
self.released = (self.pressed | self.held) - keys
self.held = (self.pressed | self.held) - self.released
self.pressed = (keys - self.held) & self.pending
self.pending = (keys - self.held)
for key in self.released:
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
for key in self.held:
(mapname, action, amount, axis) = keymap_sixaxis[key]
if amount > 0:
amount = pressure[amount-1] * 256
self.xbmc.send_button_state(map=mapname, button=action, amount=amount, down=1, axis=axis)
for key in self.pressed:
(mapname, action, amount, axis) = keymap_sixaxis[key]
if amount > 0:
amount = pressure[amount-1] * 256
self.xbmc.send_button_state(map=mapname, button=action, amount=amount, down=1, axis=axis)
if analog or keys or self.mouse_enabled:
return True
else:
return False
def send_singleaxis(self, axis, last_amount, mapname, action_min, action_pos):
amount = normalize_axis(axis, 0.30)
if last_amount < 0:
last_action = action_min
elif last_amount > 0:
last_action = action_pos
else:
last_action = None
if amount < 0:
new_action = action_min
elif amount > 0:
new_action = action_pos
else:
new_action = None
if last_action and new_action != last_action:
self.xbmc.send_button_state(map=mapname, button=last_action, amount=0, axis=1)
if new_action and amount != last_amount:
self.xbmc.send_button_state(map=mapname, button=new_action, amount=abs(amount), axis=1)
return amount
| gpl-2.0 |
simonpatrick/bite-project | deps/gdata-python-client/tests/gdata_tests/blogger_test.py | 128 | 3855 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
from gdata import test_data
import gdata.blogger
import atom
class BlogEntryTest(unittest.TestCase):
def testBlogEntryFromString(self):
entry = gdata.blogger.BlogEntryFromString(test_data.BLOG_ENTRY)
self.assertEquals(entry.GetBlogName(), 'blogName')
self.assertEquals(entry.GetBlogId(), 'blogID')
self.assertEquals(entry.title.text, 'Lizzy\'s Diary')
def testBlogPostFeedFromString(self):
feed = gdata.blogger.BlogPostFeedFromString(test_data.BLOG_POSTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.BlogPostFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.BlogPostEntry))
self.assertEquals(feed.entry[0].GetPostId(), 'postID')
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].title.text, 'Quite disagreeable')
def testCommentFeedFromString(self):
feed = gdata.blogger.CommentFeedFromString(test_data.BLOG_COMMENTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.CommentFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.CommentEntry))
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].GetCommentId(), 'commentID')
self.assertEquals(feed.entry[0].title.text, 'This is my first comment')
self.assertEquals(feed.entry[0].in_reply_to.source,
'http://blogName.blogspot.com/feeds/posts/default/postID')
self.assertEquals(feed.entry[0].in_reply_to.ref,
'tag:blogger.com,1999:blog-blogID.post-postID')
self.assertEquals(feed.entry[0].in_reply_to.href,
'http://blogName.blogspot.com/2007/04/first-post.html')
self.assertEquals(feed.entry[0].in_reply_to.type, 'text/html')
def testIdParsing(self):
entry = gdata.blogger.BlogEntry()
entry.id = atom.Id(
text='tag:blogger.com,1999:user-146606542.blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
entry.id = atom.Id(text='tag:blogger.com,1999:blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
class InReplyToTest(unittest.TestCase):
def testToAndFromString(self):
in_reply_to = gdata.blogger.InReplyTo(href='http://example.com/href',
ref='http://example.com/ref', source='http://example.com/my_post',
type='text/html')
xml_string = str(in_reply_to)
parsed = gdata.blogger.InReplyToFromString(xml_string)
self.assertEquals(parsed.source, in_reply_to.source)
self.assertEquals(parsed.href, in_reply_to.href)
self.assertEquals(parsed.ref, in_reply_to.ref)
self.assertEquals(parsed.type, in_reply_to.type)
class CommentEntryTest(unittest.TestCase):
def testToAndFromString(self):
comment = gdata.blogger.CommentEntry(content=atom.Content(text='Nifty!'),
in_reply_to=gdata.blogger.InReplyTo(
source='http://example.com/my_post'))
parsed = gdata.blogger.CommentEntryFromString(str(comment))
self.assertEquals(parsed.in_reply_to.source, comment.in_reply_to.source)
self.assertEquals(parsed.content.text, comment.content.text)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
bhairavmehta95/flashcard-helper-alexa-skill | venv/lib/python2.7/site-packages/pip/commands/list.py | 339 | 11369 | from __future__ import absolute_import
import json
import logging
import warnings
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from pip._vendor import six
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: legacy (default), columns, "
"freeze or json.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.list_format is None:
warnings.warn(
"The default format will switch to columns in the future. "
"You can use --format=(legacy|columns) (or define a "
"format=(legacy|columns) in your pip.conf under the [list] "
"section) to disable this warning.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return set(pkg for pkg in packages if pkg.key not in dep_keys)
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
else: # legacy
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist))
else:
logger.info(self.output_legacy(dist))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if any(dist_is_editable(x) for x in pkgs):
header.append("Location")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if dist_is_editable(proj):
row.append(proj.location)
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| mit |
danhuss/faker | faker/providers/person/no_NO/__init__.py | 2 | 7066 | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name_male}}-{{first_name_male}} {{last_name}}',
'{{first_name_male}}-{{first_name_male}} {{last_name}}',
'{{first_name_female}}-{{first_name_female}} {{last_name}}',
'{{first_name_female}}-{{first_name_female}} {{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{prefix}} {{first_name_male}} {{last_name}}',
)
# 100 most common male first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-menn-100.html
first_names_male = (
'Adrian',
'Alexander',
'Alf',
'Anders',
'Andreas',
'Arild',
'Arne',
'Asbjørn',
'Bjørn',
'Christian',
'Dag',
'Daniel',
'Egil',
'Einar',
'Eirik',
'Eivind',
'Emil',
'Erik',
'Erling',
'Espen',
'Finn',
'Frank',
'Fredrik',
'Frode',
'Geir',
'Gunnar',
'Hans',
'Harald',
'Helge',
'Henrik',
'Håkon',
'Håvard',
'Ivar',
'Jan',
'Jens',
'Joakim',
'Johannes',
'Johan',
'John',
'Jonas',
'Jon',
'Jørgen',
'Karl',
'Kenneth',
'Kim',
'Kjell',
'Kjetil',
'Knut',
'Kåre',
'Kristian',
'Kristoffer',
'Lars',
'Leif',
'Magne',
'Magnus',
'Marius',
'Markus',
'Martin',
'Mathias',
'Morten',
'Nils',
'Odd',
'Ola',
'Olav',
'Ole',
'Per',
'Petter',
'Pål',
'Roar',
'Robert',
'Roger',
'Rolf',
'Roy',
'Rune',
'Sander',
'Sebastian',
'Sigurd',
'Simen',
'Sindre',
'Sondre',
'Steinar',
'Stein',
'Stian',
'Stig',
'Svein',
'Sverre',
'Terje',
'Thomas',
'Thor',
'Tobias',
'Tommy',
'Tom',
'Torbjørn',
'Tore',
'Tor',
'Trond',
'Vegard',
'Vidar',
'Øystein',
'Øyvind',
)
# 100 most common female first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-kvinner-100.html
first_names_female = (
'Andrea',
'Anette',
'Anita',
'Anna',
'Anne',
'Ann',
'Astrid',
'Aud',
'Bente',
'Berit',
'Bjørg',
'Britt',
'Camilla',
'Cathrine',
'Cecilie',
'Elin',
'Elisabeth',
'Elise',
'Eli',
'Ellen',
'Else',
'Emilie',
'Emma',
'Eva',
'Gerd',
'Grete',
'Grethe',
'Gro',
'Gunn',
'Hanna',
'Hanne',
'Hege',
'Heidi',
'Helene',
'Hilde',
'Ida',
'Ingeborg',
'Inger',
'Ingrid',
'Irene',
'Janne',
'Jenny',
'Jorunn',
'Julie',
'Karen',
'Karin',
'Kari',
'Karoline',
'Kirsten',
'Kjersti',
'Kristine',
'Kristin',
'Laila',
'Lene',
'Linda',
'Line',
'Linn',
'Lise',
'Liv',
'Malin',
'Maren',
'Marianne',
'Maria',
'Marie',
'Mari',
'Marit',
'Marte',
'Martine',
'May',
'Mette',
'Mona',
'Monica',
'Nina',
'Nora',
'Ragnhild',
'Randi',
'Reidun',
'Rita',
'Ruth',
'Sara',
'Sigrid',
'Silje',
'Siri',
'Sissel',
'Siv',
'Sofie',
'Solveig',
'Stine',
'Synnøve',
'Thea',
'Tone',
'Tonje',
'Torill',
'Tove',
'Trine',
'Turid',
'Unni',
'Vilde',
'Wenche',
'Åse',
)
first_names = first_names_male + first_names_female
# 100 most common last names, alphabetically.
# Source: http://www.ssb.no/a/navn/alf/etter100.html
last_names = (
'Aasen',
'Aas',
'Abrahamsen',
'Ahmed',
'Ali',
'Amundsen',
'Andersen',
'Andreassen',
'Andresen',
'Antonsen',
'Arnesen',
'Aune',
'Bakken',
'Bakke',
'Berge',
'Berg',
'Berntsen',
'Bøe',
'Birkeland',
'Brekke',
'Christensen',
'Dahl',
'Danielsen',
'Edvardsen',
'Eide',
'Eliassen',
'Ellingsen',
'Engen',
'Eriksen',
'Evensen',
'Fredriksen',
'Gulbrandsen',
'Gundersen',
'Hagen',
'Halvorsen',
'Hansen',
'Hanssen',
'Haugen',
'Hauge',
'Haugland',
'Haug',
'Helland',
'Henriksen',
'Holm',
'Isaksen',
'Iversen',
'Jacobsen',
'Jakobsen',
'Jensen',
'Jenssen',
'Johannessen',
'Johansen',
'Johnsen',
'Jørgensen',
'Karlsen',
'Knudsen',
'Knutsen',
'Kristensen',
'Kristiansen',
'Kristoffersen',
'Larsen',
'Lien',
'Lie',
'Lunde',
'Lund',
'Madsen',
'Martinsen',
'Mathisen',
'Mikkelsen',
'Moen',
'Moe',
'Myhre',
'Myklebust',
'Nguyen',
'Nielsen',
'Nilsen',
'Næss',
'Nygård',
'Olsen',
'Paulsen',
'Pedersen',
'Pettersen',
'Rasmussen',
'Rønning',
'Ruud',
'Sandvik',
'Simonsen',
'Sivertsen',
'Solberg',
'Solheim',
'Sørensen',
'Sæther',
'Strand',
'Strøm',
'Svendsen',
'Tangen',
'Thomassen',
'Thorsen',
'Tveit',
'Vik',
'Ødegård',
)
prefixes = (
'Dr.', 'Prof.',
)
| mit |
kennethlove/django | tests/regressiontests/comment_tests/tests/comment_form_tests.py | 97 | 3007 | from __future__ import absolute_import
import time
from django.conf import settings
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article
class CommentFormTests(CommentTestCase):
def testInit(self):
f = CommentForm(Article.objects.get(pk=1))
self.assertEqual(f.initial['content_type'], str(Article._meta))
self.assertEqual(f.initial['object_pk'], "1")
self.assertNotEqual(f.initial['security_hash'], None)
self.assertNotEqual(f.initial['timestamp'], None)
def testValidPost(self):
a = Article.objects.get(pk=1)
f = CommentForm(a, data=self.getValidData(a))
self.assertTrue(f.is_valid(), f.errors)
return f
def tamperWithForm(self, **kwargs):
a = Article.objects.get(pk=1)
d = self.getValidData(a)
d.update(kwargs)
f = CommentForm(Article.objects.get(pk=1), data=d)
self.assertFalse(f.is_valid())
return f
def testHoneypotTampering(self):
self.tamperWithForm(honeypot="I am a robot")
def testTimestampTampering(self):
self.tamperWithForm(timestamp=str(time.time() - 28800))
def testSecurityHashTampering(self):
self.tamperWithForm(security_hash="Nobody expects the Spanish Inquisition!")
def testContentTypeTampering(self):
self.tamperWithForm(content_type="auth.user")
def testObjectPKTampering(self):
self.tamperWithForm(object_pk="3")
def testSecurityErrors(self):
f = self.tamperWithForm(honeypot="I am a robot")
self.assertTrue("honeypot" in f.security_errors())
def testGetCommentObject(self):
f = self.testValidPost()
c = f.get_comment_object()
self.assertTrue(isinstance(c, Comment))
self.assertEqual(c.content_object, Article.objects.get(pk=1))
self.assertEqual(c.comment, "This is my comment")
c.save()
self.assertEqual(Comment.objects.count(), 1)
def testProfanities(self):
"""Test COMMENTS_ALLOW_PROFANITIES and PROFANITIES_LIST settings"""
a = Article.objects.get(pk=1)
d = self.getValidData(a)
# Save settings in case other tests need 'em
saved = settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES
# Don't wanna swear in the unit tests if we don't have to...
settings.PROFANITIES_LIST = ["rooster"]
# Try with COMMENTS_ALLOW_PROFANITIES off
settings.COMMENTS_ALLOW_PROFANITIES = False
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertFalse(f.is_valid())
# Now with COMMENTS_ALLOW_PROFANITIES on
settings.COMMENTS_ALLOW_PROFANITIES = True
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertTrue(f.is_valid())
# Restore settings
settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES = saved
| bsd-3-clause |
kirca/OpenUpgrade | addons/l10n_ca/__openerp__.py | 260 | 3087 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Canada - Accounting',
'version': '1.2',
'author': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the English and French - Canadian accounting chart in OpenERP.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the supplier and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international supplier doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the supplier, set the fiscal position to International.
4) An international supplier charge you your provincial tax. They are registered with your
provincial government and remit taxes themselves. On the supplier, do not set any fiscal
position.
""",
'depends': [
'base',
'account',
'base_iban',
'base_vat',
'account_chart',
'account_anglo_saxon'
],
'data': [
'account_chart_en.xml',
'account_tax_code_en.xml',
'account_chart_template_en.xml',
'account_tax_en.xml',
'fiscal_templates_en.xml',
'account_chart_fr.xml',
'account_tax_code_fr.xml',
'account_chart_template_fr.xml',
'account_tax_fr.xml',
'fiscal_templates_fr.xml',
'l10n_ca_wizard.xml'
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
matrogers/pylearn2 | pylearn2/models/sparse_autoencoder.py | 37 | 1053 | import theano
import theano.sparse
from theano import tensor
from pylearn2.models.autoencoder import DenoisingAutoencoder
from pylearn2.space import VectorSpace
from theano.sparse.sandbox.sp2 import sampling_dot
from pylearn2.expr.basic import theano_norms
class SparseDenoisingAutoencoder(DenoisingAutoencoder):
"""
Denoising autoencoder working with only sparse inputs and efficient
reconstruction sampling
Parameters
----------
corruptor : WRITEME
nvis : WRITEME
nhid : WRITEME
act_enc : WRITEME
act_dec : WRITEME
tied_weights : WRITEME
irange : WRITEME
rng : WRITEME
References
----------
Y. Dauphin, X. Glorot, Y. Bengio. Large-Scale Learning of Embeddings with
Reconstruction Sampling. In Proceedings of the 28th International
Conference on Machine Learning (ICML 2011).
"""
def __init__(self):
raise NotImplementedError(
'This class has been deprecated since 2012.'\
'In Feb, 2015, all historical codes are hence removed.')
| bsd-3-clause |
glennw/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/msgutil.py | 658 | 7598 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Message related utilities.
Note: request.connection.write/read are used in this module, even though
mod_python document says that they should be used only in connection
handlers. Unfortunately, we have no other options. For example,
request.write/read are not suitable because they don't allow direct raw
bytes writing/reading.
"""
import Queue
import threading
# Export Exception symbols from msgutil for backward compatibility
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import UnsupportedFrameException
# An API for handler to send/receive WebSocket messages.
def close_connection(request):
"""Close connection.
Args:
request: mod_python request.
"""
request.ws_stream.close_connection()
def send_message(request, payload_data, end=True, binary=False):
"""Send a message (or part of a message).
Args:
request: mod_python request.
payload_data: unicode text or str binary to send.
end: True to terminate a message.
False to send payload_data as part of a message that is to be
terminated by next or later send_message call with end=True.
binary: send payload_data as binary frame(s).
Raises:
BadOperationException: when server already terminated.
"""
request.ws_stream.send_message(payload_data, end, binary)
def receive_message(request):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Args:
request: mod_python request.
Raises:
InvalidFrameException: when client send invalid frame.
UnsupportedFrameException: when client send unsupported frame e.g. some
of reserved bit is set but no extension can
recognize it.
InvalidUTF8Exception: when client send a text frame containing any
invalid UTF-8 string.
ConnectionTerminatedException: when the connection is closed
unexpectedly.
BadOperationException: when client already terminated.
"""
return request.ws_stream.receive_message()
def send_ping(request, body=''):
request.ws_stream.send_ping(body)
class MessageReceiver(threading.Thread):
"""This class receives messages from the client.
This class provides three ways to receive messages: blocking,
non-blocking, and via callback. Callback has the highest precedence.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request, onmessage=None):
"""Construct an instance.
Args:
request: mod_python request.
onmessage: a function to be called when a message is received.
May be None. If not None, the function is called on
another thread. In that case, MessageReceiver.receive
and MessageReceiver.receive_nowait are useless
because they will never return any messages.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self._onmessage = onmessage
self._stop_requested = False
self.setDaemon(True)
self.start()
def run(self):
try:
while not self._stop_requested:
message = receive_message(self._request)
if self._onmessage:
self._onmessage(message)
else:
self._queue.put(message)
finally:
close_connection(self._request)
def receive(self):
""" Receive a message from the channel, blocking.
Returns:
message as a unicode string.
"""
return self._queue.get()
def receive_nowait(self):
""" Receive a message from the channel, non-blocking.
Returns:
message as a unicode string if available. None otherwise.
"""
try:
message = self._queue.get_nowait()
except Queue.Empty:
message = None
return message
def stop(self):
"""Request to stop this instance.
The instance will be stopped after receiving the next message.
This method may not be very useful, but there is no clean way
in Python to forcefully stop a running thread.
"""
self._stop_requested = True
class MessageSender(threading.Thread):
"""This class sends messages to the client.
This class provides both synchronous and asynchronous ways to send
messages.
Note: This class should not be used with the standalone server for wss
because pyOpenSSL used by the server raises a fatal error if the socket
is accessed from multiple threads.
"""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
threading.Thread.__init__(self)
self._request = request
self._queue = Queue.Queue()
self.setDaemon(True)
self.start()
def run(self):
while True:
message, condition = self._queue.get()
condition.acquire()
send_message(self._request, message)
condition.notify()
condition.release()
def send(self, message):
"""Send a message, blocking."""
condition = threading.Condition()
condition.acquire()
self._queue.put((message, condition))
condition.wait()
def send_nowait(self, message):
"""Send a message, non-blocking."""
self._queue.put((message, threading.Condition()))
# vi:sts=4 sw=4 et
| mpl-2.0 |
StephenWeber/ansible | lib/ansible/plugins/strategy/debug.py | 22 | 5050 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import pprint
import sys
from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule
from ansible.compat.six.moves import reduce
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class StrategyModule(LinearStrategyModule):
def __init__(self, tqm):
self.curr_tqm = tqm
super(StrategyModule, self).__init__(tqm)
def _queue_task(self, host, task, task_vars, play_context):
self.curr_host = host
self.curr_task = task
self.curr_task_vars = task_vars
self.curr_play_context = play_context
super(StrategyModule, self)._queue_task(host, task, task_vars, play_context)
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
if not hasattr(self, "curr_host"):
return super(StrategyModule, self)._process_pending_results(iterator, one_pass, max_passes)
prev_host_state = iterator.get_host_state(self.curr_host)
results = super(StrategyModule, self)._process_pending_results(iterator, one_pass)
while self._need_debug(results):
next_action = NextAction()
dbg = Debugger(self, results, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self.curr_tqm.clear_failed_hosts()
iterator._host_states[self.curr_host.name] = prev_host_state
if reduce(lambda total, res : res.is_failed() or total, results, False):
self._tqm._stats.failures[self.curr_host.name] -= 1
elif reduce(lambda total, res : res.is_unreachable() or total, results, False):
self._tqm._stats.dark[self.curr_host.name] -= 1
# redo
super(StrategyModule, self)._queue_task(self.curr_host, self.curr_task, self.curr_task_vars, self.curr_play_context)
results = super(StrategyModule, self)._process_pending_results(iterator, one_pass)
elif next_action.result == NextAction.CONTINUE:
break
elif next_action.result == NextAction.EXIT:
exit(1)
return results
def _need_debug(self, results):
return reduce(lambda total, res : res.is_failed() or res.is_unreachable() or total, results, False)
class Debugger(cmd.Cmd):
prompt = '(debug) ' # debugger
prompt_continuous = '> ' # multiple lines
def __init__(self, strategy_module, results, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.intro = "Debugger invoked"
self.scope = {}
self.scope['task'] = strategy_module.curr_task
self.scope['vars'] = strategy_module.curr_task_vars
self.scope['host'] = strategy_module.curr_host
self.scope['result'] = results[0]._result
self.scope['results'] = results # for debug of this debugger
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
def do_EOF(self, args):
return self.do_quit(args)
def do_quit(self, args):
display.display('aborted')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_p(self, args):
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except:
pass
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
display.display(pprint.pformat(result))
except:
pass
| gpl-3.0 |
castedo/celauth | docs/conf.py | 1 | 8133 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'celauth'
copyright = u'2014, Castedo Ellerman LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'celauthdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'celauth.tex', u'celauth Documentation',
u'E. Castedo Ellerman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'celauth', u'celauth Documentation',
[u'E. Castedo Ellerman'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'celauth', u'celauth Documentation',
u'E. Castedo Ellerman', 'celauth', 'celauth',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
OpenLocalMap/OpenLocalMap | Python Version/getColumnsClass.py | 1 | 4010 | # OpenLocalMap OpenSource web mapping for local government
# Copyright (C) <2014> <Ben Calnan>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import DBconn
import json
class getColumnsClass:
## live_DB;
totalArray = []
## query
## columnString
def __init__(self):
self.live_DB = DBconn.DBconn("Meta")
def getPopUpData(layerlist):
layerArray = json.loads(layerlist)
for layer in layerArray:
Layername = layer[0]
RecordPK = layer[1]
#GET ORACLE NAME, FRIENDLYNAME DATACOLUMN NAMES and FRIENDLY NAMES for Layer
self.query = "select * from " + metaschema + "." + metatable + " where LAYERNAME = '" + Layername + "'"
self.live_DB.setStid(self.query)
LayerStuff = self.live_DB.getArray()
FriendlyLayer = LayerStuff[0]['FRIENDLYNAME']
Workspace = LayerStuff[0]['WORKSPACE']
Column1 = LayerStuff[0]['DATA1']
Column2 = LayerStuff[0]['DATA2']
Column3 = LayerStuff[0]['DATA3']
Column4 = LayerStuff[0]['DATA4']
Column5 = LayerStuff[0]['DATA5']
Friendly1 = LayerStuff[0]['DATAFRIENDLY1']
Friendly2= LayerStuff[0]['DATAFRIENDLY2']
Friendly3 = LayerStuff[0]['DATAFRIENDLY3']
Friendly4 = LayerStuff[0]['DATAFRIENDLY4']
Friendly5 = LayerStuff[0]['DATAFRIENDLY5']
#BUILD UP COLUMN LIST
if Column1 is not None:
self.columnString = Column1
if LayerStuff[0]['DATA2'] is not None:
self.columnString = self.columnString + ", " + LayerStuff[0]['DATA2']
if LayerStuff[0]['DATA3'] is not None:
self.columnString = self.columnString + ", " + LayerStuff[0]['DATA3']
if LayerStuff[0]['DATA4'] is not None:
self.columnString = self.columnString + ", " + LayerStuff[0]['DATA4']
if LayerStuff[0]['DATA5'] is not None:
self.columnString = self.columnString + ", " + LayerStuff[0]['DATA5']
#SEARCH ORACLE TABLE for record values using ORALCE NAME AND DATACOLUMN NAMES
#HAVE TO USE MI_PRINX AS STANDARD PK FOR NOW. COULD STORE IN LAYER.
self.query = "select " + self.columnString + " from " + LayerStuff[0]['ORACLENAME'] + " where MI_PRINX = '" + RecordPK + "'"
self.live_DB.setStid(self.query)
RecordStuff = self.live_DB.getArray()
FinalArray = []
FinalArray["LayerName"] = FriendlyLayer
FinalArray["GeoserverLayer"] = Workspace + ":" + Layername
if RecordStuff[0][Column1] is not None:
FinalArray[Friendly1] = RecordStuff[0][Column1]
if RecordStuff[0][Column2] is not None:
FinalArray[Friendly2] = RecordStuff[0][Column2]
if RecordStuff[0][Column3] is not None:
FinalArray[Friendly3] = RecordStuff[0][Column3]
if RecordStuff[0][Column4] is not None:
FinalArray[Friendly4] = RecordStuff[0][Column4]
if RecordStuff[0][Column5] is not None:
FinalArray[Friendly5] = RecordStuff[0][Column5]
#ADD ALL TO ARRAY
self.totalArray.append(FinalArray)
#ENCODE TO JSON
return json.dumps(self.totalArray)
| gpl-3.0 |
pierce403/EmpirePanel | lib/modules/situational_awareness/network/powerview/find_localadmin_access.py | 1 | 4198 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Find-LocalAdminAccess',
'Author': ['@harmj0y'],
'Description': ('Finds machines on the local domain where the current user has '
'local administrator access. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Hosts to enumerate, comma separated.',
'Required' : False,
'Value' : ''
},
'ComputerFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'NoPing' : {
'Description' : "Don't ping each host to ensure it's up before enumerating.",
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'Threads' : {
'Description' : 'The maximum concurrent threads to execute.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script
| bsd-3-clause |
massimo-nocentini/recurrences-unfolding | src/destructuring.py | 2 | 2564 |
from sympy import Wild, Indexed
from contextlib import contextmanager
class DestructuringError(ValueError):
'''
Represent an error due to the impossibility to destructure a given term.
At the present, we neither provide meaningful error messages nor objects
related to the context in which this exception was raised; moreover, we
do not distinguish the operator in the tackled combination term (Add, Mul,...).
'''
pass
# only for keep the same api, delete it when refactoring is finished,
# a good name to use could be: "destructuring_monomial_with_coeff_subscripts"
@contextmanager
def bind_Mul_indexed(term, indexed, forbidden_terms=[]):
'''
Destructure `term` against pattern `coeff * f[i j ...]`, binding `coeff`, `i` and `j ...`.
I attempt to destructure the given term respect the `Mul` operator, aiming to isolate
term `indexed`, which should be an instance of `Indexed` class, from a coefficient `coeff`,
which collect everything but `indexed` and, optionally, objects appearing in `forbidden_terms`.
If such destructuring fails, then I raise `DestructuringError`.
Examples
========
>>> from sympy import *
Main track, everything is good:
>>> f, n, k, j = IndexedBase('f'), *symbols('n k j')
>>> term = 3 * f[n,k,j]
>>> with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
3 * [n, k, j]
Failure, not a vanilla product:
>>> term = 3 * f[n] + 1
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
Failure, `f` not indexed at all:
>>> term = 3 * f
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
'''
coeff_w, ind_w = Wild('coeff', exclude=[indexed] + forbidden_terms), Wild('ind')
matched = term.match(coeff_w * ind_w)
# if no indexing applied then `isinstance(matched[ind_w], IndexedBase)` holds
if (matched
and ind_w in matched
and coeff_w in matched
and isinstance(matched[ind_w], Indexed)):
_, *subscripts = matched[ind_w].args
yield matched[coeff_w], subscripts # do not splice subscripts, give them packed
else:
raise DestructuringError()
| apache-2.0 |
anuruddhal/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/artifactmgt/repository.py | 19 | 1429 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class Repository:
"""
Holds repository information to be used in artifact management
"""
def __init__(self, repo_url, repo_username, repo_password, repo_path, tenant_id, commit_enabled):
self.repo_url = repo_url
""" :type : str """
self.repo_username = repo_username
""" :type : str """
self.repo_password = repo_password
""" :type : str """
self.repo_path = repo_path
""" :type : str """
self.tenant_id = tenant_id
""" :type : int """
""" :type : bool """
self.commit_enabled = commit_enabled
""" :type : bool """ | apache-2.0 |
flotre/sickbeard-vfvo | lib/subliminal/services/podnapisi.py | 54 | 4909 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError, DownloadFailedError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import to_unicode
from ..videos import Episode, Movie
from hashlib import md5, sha256
import logging
import xmlrpclib
logger = logging.getLogger("subliminal")
class Podnapisi(ServiceBase):
server_url = 'http://ssp.podnapisi.net:8000'
site_url = 'http://www.podnapisi.net'
api_based = True
languages = language_set(['ar', 'be', 'bg', 'bs', 'ca', 'ca', 'cs', 'da', 'de', 'el', 'en',
'es', 'et', 'fa', 'fi', 'fr', 'ga', 'he', 'hi', 'hr', 'hu', 'id',
'is', 'it', 'ja', 'ko', 'lt', 'lv', 'mk', 'ms', 'nl', 'nn', 'pl',
'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv', 'th', 'tr', 'uk',
'vi', 'zh', 'es-ar', 'pb'])
language_map = {'jp': Language('jpn'), Language('jpn'): 'jp',
'gr': Language('gre'), Language('gre'): 'gr',
# 'pb': Language('por-BR'), Language('por-BR'): 'pb',
'ag': Language('spa-AR'), Language('spa-AR'): 'ag',
'cyr': Language('srp')}
videos = [Episode, Movie]
require_video = True
def __init__(self, config=None):
super(Podnapisi, self).__init__(config)
self.server = xmlrpclib.ServerProxy(self.server_url)
self.token = None
def init(self):
super(Podnapisi, self).init()
result = self.server.initiate(self.user_agent)
if result['status'] != 200:
raise ServiceError('Initiate failed')
username = 'python_subliminal'
password = sha256(md5('XWFXQ6gE5Oe12rv4qxXX').hexdigest() + result['nonce']).hexdigest()
self.token = result['session']
result = self.server.authenticate(self.token, username, password)
if result['status'] != 200:
raise ServiceError('Authenticate failed')
def terminate(self):
super(Podnapisi, self).terminate()
def query(self, filepath, languages, moviehash):
results = self.server.search(self.token, [moviehash])
if results['status'] != 200:
logger.error('Search failed with error code %d' % results['status'])
return []
if not results['results'] or not results['results'][moviehash]['subtitles']:
logger.debug(u'Could not find subtitles for %r with token %s' % (moviehash, self.token))
return []
subtitles = []
for result in results['results'][moviehash]['subtitles']:
language = self.get_language(result['lang'])
if language not in languages:
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), result['id'],
release=to_unicode(result['release']), confidence=result['weight'])
subtitles.append(subtitle)
if not subtitles:
return []
# Convert weight to confidence
max_weight = float(max([s.confidence for s in subtitles]))
min_weight = float(min([s.confidence for s in subtitles]))
for subtitle in subtitles:
if max_weight == 0 and min_weight == 0:
subtitle.confidence = 1.0
else:
subtitle.confidence = (subtitle.confidence - min_weight) / (max_weight - min_weight)
return subtitles
def list_checked(self, video, languages):
results = self.query(video.path, languages, video.hashes['OpenSubtitles'])
return results
def download(self, subtitle):
results = self.server.download(self.token, [subtitle.link])
if results['status'] != 200:
raise DownloadFailedError()
subtitle.link = 'http://www.podnapisi.net/static/podnapisi/' + results['names'][0]['filename']
self.download_file(subtitle.link, subtitle.path)
return subtitle
Service = Podnapisi
| gpl-3.0 |
yining0417/zookeeper | build/zookeeper-3.4.6/contrib/zkpython/src/test/create_test.py | 164 | 4170 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, zktestbase, unittest, threading
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class CreationTest(zktestbase.TestBase):
"""Test whether we can create znodes"""
# to do: startup and teardown via scripts?
def setUp(self):
zktestbase.TestBase.setUp(self)
try:
zookeeper.delete(self.handle, "/zk-python-createtest")
zookeeper.delete(self.handle, "/zk-python-acreatetest")
except:
pass
def test_sync_create(self):
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-createtest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-createtest")
self.assertRaises(zookeeper.NoChildrenForEphemeralsException,
zookeeper.create,
self.handle,
"/zk-python-createtest/invalid-child",
"",
[ZOO_OPEN_ACL_UNSAFE],
zookeeper.EPHEMERAL)
def test_sync_create_existing(self):
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-createtest-existing", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-createtest-existing")
self.assertRaises(zookeeper.NodeExistsException,
zookeeper.create,
self.handle,
"/zk-python-createtest-existing",
"nodecontents",
[ZOO_OPEN_ACL_UNSAFE],
zookeeper.EPHEMERAL)
def test_exception_paths(self):
"""
Make sure common exceptions due to API misuse are correctly propogated
"""
self.assertRaises(zookeeper.BadArgumentsException,
zookeeper.create,
self.handle,
"/zk-python-badargs-test",
"",
[ZOO_OPEN_ACL_UNSAFE],
-1)
self.assertRaises(zookeeper.InvalidACLException,
zookeeper.create,
self.handle,
"/zk-python-invalidacl-test",
"",
ZOO_OPEN_ACL_UNSAFE) # Error - not a list
def test_async_create(self):
self.cv = threading.Condition()
def callback(handle, rc, value):
self.cv.acquire()
self.callback_flag = True
self.rc = rc
self.cv.notify()
self.cv.release()
self.assertEqual(self.connected, True, "Not connected!")
self.cv.acquire()
ret = zookeeper.acreate(self.handle, "/zk-python-acreatetest", "nodecontents",
[ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL,
callback )
self.assertEqual(ret, zookeeper.OK, "acreate failed")
while not self.callback_flag:
self.cv.wait(15)
self.cv.release()
self.assertEqual(self.callback_flag, True, "acreate timed out")
self.assertEqual(self.rc, zookeeper.OK)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
MWisBest/android_kernel_amazon_bowser-common | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
cchurch/ansible | lib/ansible/module_utils/rax.py | 38 | 12105 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
from uuid import UUID
from ansible.module_utils.six import text_type, binary_type
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception as e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except Exception:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='path', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
validate_certs=dict(type='bool', aliases=['verify_ssl']),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('validate_certs')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception as e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
byakuinss/spark | examples/src/main/python/parquet_inputformat.py | 42 | 2386 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
"""
Read data file users.parquet in local Spark distro:
$ cd $SPARK_HOME
$ export AVRO_PARQUET_JARS=/path/to/parquet-avro-1.5.0.jar
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \\
--jars $AVRO_PARQUET_JARS \\
./examples/src/main/python/parquet_inputformat.py \\
examples/src/main/resources/users.parquet
<...lots of log output...>
{u'favorite_color': None, u'name': u'Alyssa', u'favorite_numbers': [3, 9, 15, 20]}
{u'favorite_color': u'red', u'name': u'Ben', u'favorite_numbers': []}
<...more log output...>
"""
if __name__ == "__main__":
if len(sys.argv) != 2:
print("""
Usage: parquet_inputformat.py <data_file>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \\
/path/to/examples/parquet_inputformat.py <data_file>
Assumes you have Parquet data stored in <data_file>.
""", file=sys.stderr)
exit(-1)
path = sys.argv[1]
spark = SparkSession\
.builder\
.appName("ParquetInputFormat")\
.getOrCreate()
sc = spark.sparkContext
parquet_rdd = sc.newAPIHadoopFile(
path,
'org.apache.parquet.avro.AvroParquetInputFormat',
'java.lang.Void',
'org.apache.avro.generic.IndexedRecord',
valueConverter='org.apache.spark.examples.pythonconverters.IndexedRecordToJavaConverter')
output = parquet_rdd.map(lambda x: x[1]).collect()
for k in output:
print(k)
spark.stop()
| apache-2.0 |
mcgachey/edx-platform | lms/djangoapps/courseware/features/word_cloud.py | 94 | 1516 | # pylint: disable=missing-docstring
from lettuce import world, step
from common import i_am_registered_for_the_course, section_location, visit_scenario_item
@step('I view the word cloud and it has rendered')
def word_cloud_is_rendered(_step):
assert world.is_css_present('.word_cloud')
@step('the course has a Word Cloud component')
def view_word_cloud(_step):
coursenum = 'test_course'
i_am_registered_for_the_course(_step, coursenum)
add_word_cloud_to_course(coursenum)
visit_scenario_item('SECTION')
@step('I press the Save button')
def press_the_save_button(_step):
button_css = '.input_cloud_section input.save'
world.css_click(button_css)
@step('I see the empty result')
def see_empty_result(_step):
assert world.css_text('.your_words', 0) == ''
@step('I fill inputs')
def fill_inputs(_step):
input_css = '.input_cloud_section .input-cloud'
world.css_fill(input_css, 'text1', 0)
for index in range(1, 4):
world.css_fill('.input_cloud_section .input-cloud', 'text2', index)
@step('I see the result with words count')
def see_result(_step):
strong_css = '.your_words strong'
target_text = set([world.css_text(strong_css, i) for i in range(2)])
assert set(['text1', 'text2']) == target_text
def add_word_cloud_to_course(course):
category = 'word_cloud'
world.ItemFactory.create(parent_location=section_location(course),
category=category,
display_name='Word Cloud')
| agpl-3.0 |
vargax/ejemplos | python/imagine/vafus_pyVissim/listening_server.py | 1 | 1763 | __author__ = 'c.vargas124'
# ------------------------
# Imports
# ------------------------
import socket
import asyncore
import threading
import simulation
# ------------------------
# Constants
# ------------------------
MODULE_NAME = "Listening Server"
# ------------------------
# Classes
# ------------------------
class ServerThread(threading.Thread): # Support class to run the 'Server' in its own thread
def __init__(self, port):
threading.Thread.__init__(self)
self.server = Server(port)
def run(self):
asyncore.loop()
def stop(self):
self.server.stop()
self.join()
class Server(asyncore.dispatcher): # Class in charge of listening for incoming connections
def __init__(self, port):
asyncore.dispatcher.__init__(self)
self.host = socket.gethostname()
self.port = port
self.clients = [] # List to store the active servers...
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((self.host, self.port))
self.listen(5)
print "["+MODULE_NAME+"] Listening on {h}:{p}".format(h=self.host, p=self.port)
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print "["+MODULE_NAME+"] We got a connection from {a}".format(a=addr)
self.clients.append(simulation.SocketHandler(self, sock, addr))
def stop(self):
self.close()
if len(self.clients) != 0:
print "["+MODULE_NAME+"] Warning: There are "+str(len(self.clients))+" active sockets!"
for client in self.clients:
client.disconnect() | gpl-2.0 |
kawamon/hue | apps/jobsub/src/jobsub/tests.py | 2 | 9979 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from builtins import range
import logging
import json
import time
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from django.urls import reverse
from nose.plugins.skip import SkipTest
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from desktop.models import Document
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.models import Workflow, Node, Start, Kill, End, Link
from useradmin.models import User
LOG = logging.getLogger(__name__)
class TestJobsubWithHadoop(OozieServerProvider):
def setUp(self):
OozieServerProvider.setup_class()
self.cluster.fs.do_as_user('jobsub_test', self.cluster.fs.create_home_dir, '/user/jobsub_test')
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/user/jobsub_test', 0o777, True) # Hum?
self.client = make_logged_in_client(username='jobsub_test')
self.user = User.objects.get(username='jobsub_test')
# Ensure access to MR folder.
# Need to chmod because jobs are submitted as a
# different user than what was previously used.
for i in range(0, 10):
try:
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/tmp', 0o777, recursive=True)
break
except Exception as e:
# chmod failure likely do to async processing of resource deletion.
# If the directory has improper permissions, should fail later in the test case.
LOG.warning("Received the following exception while change mode attempt %d of /tmp: %s" % (i, str(e)))
time.sleep(1)
self.design = self.create_design()
def tearDown(self):
Workflow.objects.all().delete()
def create_design(self):
response = self.client.post(
reverse('jobsub:jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}),
data={
'name': 'sleep_job',
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[]',
'archives': '[]',
'job_properties': ('[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},'
'{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},'
'{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},'
'{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},'
'{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},'
'{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},'
'{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},'
'{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]')
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
assert_equal(response.status_code, 200)
return Workflow.objects.all()[0]
def test_new_design(self):
# Ensure the following:
# - creator is owner.
# - workflow name and description are the same as action name and description.
# - workflow has one action.
assert_false(self.design.managed)
assert_equal(4, Node.objects.filter(workflow=self.design).count())
assert_equal(1, Kill.objects.filter(workflow=self.design).count())
assert_equal(1, Start.objects.filter(workflow=self.design).count())
assert_equal(1, End.objects.filter(workflow=self.design).count())
assert_equal(4, Node.objects.filter(workflow=self.design).count())
assert_equal(3, Link.objects.filter(parent__workflow=self.design).count())
def test_save_design(self):
response = self.client.post(
reverse('jobsub:jobsub.views.save_design', kwargs={'design_id': self.design.id}),
data={
'name': 'mapreduce1',
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[{"name": "test", "dummy": ""}]',
'archives': '[]',
'job_properties': ('[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},'
'{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},'
'{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},'
'{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},'
'{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},'
'{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},'
'{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},'
'{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},'
'{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]')
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
assert_equal(response.status_code, 200)
self.design = Workflow.objects.get(id=self.design.id)
assert_equal(self.design.start.get_child('to').get_full_node().files, '[{"dummy": "", "name": "test"}]')
def test_get_design(self):
response = self.client.get(reverse('jobsub:jobsub.views.get_design',
kwargs={'design_id': self.design.id}),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
client_note_me = make_logged_in_client(username='jobsub_test_note_me', is_superuser=False)
grant_access("jobsub_test_note_me", "jobsub_test_note_me", "jobsub")
add_to_group("jobsub_test_note_me")
response = client_note_me.get(reverse('jobsub:jobsub.views.get_design',
kwargs={'design_id': self.design.id}),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 500)
data = json.loads(response.content)
assert_true('does not have the permissions required to access document' in data.get('message', ''), response.content)
def test_delete_design(self):
# Trash
n_available = Document.objects.available_docs(Workflow, self.user).count()
n_trashed = Document.objects.trashed_docs(Workflow, self.user).count()
response = self.client.post(reverse('jobsub:jobsub.views.delete_design',
kwargs={'design_id': self.design.id}),
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
assert_equal(n_available - 1, Document.objects.available_docs(Workflow, self.user).count())
assert_equal(n_trashed + 1, Document.objects.trashed_docs(Workflow, self.user).count())
# Destroy
response = self.client.post(reverse('jobsub:jobsub.views.delete_design',
kwargs={'design_id': self.design.id}) + '?skip_trash',
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
assert_equal(n_available - 1, Document.objects.available_docs(Workflow, self.user).count())
assert_equal(n_trashed, Document.objects.trashed_docs(Workflow, self.user).count())
def test_clone_design(self):
#@TODO@ Prakash fix this test
raise SkipTest
n_available = Document.objects.available_docs(Workflow, self.user).count()
response = self.client.post(reverse('jobsub:jobsub.views.clone_design',
kwargs={'design_id': self.design.id}),
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
assert_equal(n_available + 1, Document.objects.available_docs(Workflow, self.user).count())
def test_restore_design(self):
n_available = Document.objects.available_docs(Workflow, self.user).count()
n_trashed = Document.objects.trashed_docs(Workflow, self.user).count()
response = self.client.post(reverse('jobsub:jobsub.views.delete_design',
kwargs={'design_id': self.design.id}),
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
assert_equal(n_available - 1, Document.objects.available_docs(Workflow, self.user).count())
assert_equal(n_trashed + 1, Document.objects.trashed_docs(Workflow, self.user).count())
response = self.client.post(reverse('jobsub:jobsub.views.restore_design',
kwargs={'design_id': self.design.id}),
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
assert_equal(n_available, Document.objects.available_docs(Workflow, self.user).count())
assert_equal(n_trashed, Document.objects.trashed_docs(Workflow, self.user).count())
| apache-2.0 |
8191/ansible | lib/ansible/inventory/dir.py | 20 | 4052 | # (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import os
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible import utils
from ansible import errors
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
self.parsers = []
self.hosts = {}
self.groups = {}
for i in self.names:
if i.endswith("~") or i.endswith(".orig") or i.endswith(".bak"):
continue
if i.endswith(".ini"):
# configuration file for an inventory script
continue
if i.endswith(".retry"):
# this file is generated on a failed playbook and should only be
# used when run specifically
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
continue
# These are things inside of an inventory basedir
if i in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
parser = InventoryDirectory(filename=fullpath)
elif utils.is_executable(fullpath):
parser = InventoryScript(filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
# This takes a lot of code because we can't directly use any of the objects, as they have to blend
for name, group in parser.groups.iteritems():
if name not in self.groups:
self.groups[name] = group
else:
# group is already there, copy variables
# note: depth numbers on duplicates may be bogus
for k, v in group.get_variables().iteritems():
self.groups[name].set_variable(k, v)
for host in group.get_hosts():
if host.name not in self.hosts:
self.hosts[host.name] = host
else:
# host is already there, copy variables
# note: depth numbers on duplicates may be bogus
for k, v in host.vars.iteritems():
self.hosts[host.name].set_variable(k, v)
self.groups[name].add_host(self.hosts[host.name])
# This needs to be a second loop to ensure all the parent groups exist
for name, group in parser.groups.iteritems():
for ancestor in group.get_ancestors():
self.groups[ancestor.name].add_child_group(self.groups[name])
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
vars = {}
for i in self.parsers:
vars.update(i.get_host_variables(host))
return vars
| gpl-3.0 |
ewdurbin/sentry | tests/sentry/web/frontend/groups/tests.py | 11 | 6547 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from exam import fixture
from sentry.models import GroupSeen
from sentry.testutils import TestCase
class GroupDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_simple(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert resp.context['group'] == self.group
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['organization'] == self.organization
# ensure we've marked the group as seen
assert GroupSeen.objects.filter(
group=self.group, user=self.user).exists()
class GroupListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-stream', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
})
def setUp(self):
super(GroupListTest, self).setUp()
later = timezone.now()
now = later - timedelta(hours=1)
past = now - timedelta(hours=1)
self.group1 = self.create_group(
project=self.project,
last_seen=now,
first_seen=now,
times_seen=5,
)
self.group2 = self.create_group(
project=self.project,
last_seen=later,
first_seen=past,
times_seen=50,
)
def test_does_render(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['organization'] == self.organization
def test_date_sort(self):
self.login_as(self.user)
resp = self.client.get(self.path + '?sort=date')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert list(resp.context['event_list']) == [self.group2, self.group1]
def test_new_sort(self):
self.login_as(self.user)
resp = self.client.get(self.path + '?sort=new')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
print self.group1.score, self.group2.score
assert list(resp.context['event_list']) == [self.group1, self.group2]
def test_freq_sort(self):
self.login_as(self.user)
resp = self.client.get(self.path + '?sort=freq')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert list(resp.context['event_list']) == [self.group2, self.group1]
class GroupEventListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-events', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
event = self.create_event(
event_id='a' * 32, datetime=timezone.now() - timedelta(minutes=1))
event2 = self.create_event(
event_id='b' * 32, datetime=timezone.now())
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['organization'] == self.organization
event_list = resp.context['event_list']
assert len(event_list) == 2
assert event_list[0] == event2
assert event_list[1] == event
class GroupTagListTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-tags', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/tag_list.html')
assert 'tag_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['organization'] == self.organization
class GroupEventDetailsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id': self.event.id,
})
def test_does_render(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['event'] == self.event
assert resp.context['organization'] == self.organization
class GroupEventJsonTest(TestCase):
@fixture
def path(self):
return reverse('sentry-group-event-json', kwargs={
'organization_slug': self.organization.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id_or_latest': self.event.id,
})
def test_does_render(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert data['id'] == self.event.event_id
| bsd-3-clause |
alisidd/tensorflow | tensorflow/python/kernel_tests/random_ops_test.py | 19 | 11746 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RandomNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class TruncatedNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
if not test.is_gpu_available():
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
if not test.is_gpu_available():
return
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(
200000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
def testLargeShape(self):
with self.test_session(use_gpu=True):
v = variables.Variable(
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
n = random_ops.truncated_normal(v.shape)
self.assertEqual([8589934592, 1], n.shape.as_list())
def testNoCSE(self):
with self.test_session(use_gpu=True):
shape = [2, 3, 4]
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class RandomUniformTest(test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testRange(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 1 << 30
sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
count_limit = 50 if dt == dtypes.float16 else 10
if count >= count_limit:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < count_limit)
# Check that uniform ints actually follow a uniform distribution.
def testUniformInts(self):
minv = -2
maxv = 15
n = 100000
p = 1 / (maxv - minv)
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
sampler = self._Sampler(
n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
self.assertEqual(counts.shape, (maxv - minv,))
self.assertEqual(counts.sum(), n)
error = np.abs(counts - mean)
self.assertLess(error.max(), 5 * std)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
sampler = self._Sampler(
1000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
shape = [2, 3, 4]
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.test_session(use_gpu=True):
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
class RandomShapeTest(test.TestCase):
def testTruncatedNormal(self):
# Fully known shape.
rnd1 = random_ops.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.truncated_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
rnd1 = random_ops.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
rnd1 = random_ops.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_uniform(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_uniform(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
test.main()
| apache-2.0 |
saydulk/django | tests/model_regress/models.py | 281 | 2293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
# Test models with non-default primary keys / AutoFields #5218
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, models.CASCADE, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, models.CASCADE, unique=True, to_field='model1')
| bsd-3-clause |
patrickstocklin/chattR | lib/python2.7/site-packages/django/contrib/admin/migrations/0001_initial.py | 142 | 1657 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(to_field='id', blank=True, to='contenttypes.ContentType', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| gpl-2.0 |
noroot/zulip | puppet/zulip_internal/files/postgresql/pg_backup_and_purge.py | 114 | 1575 | #!/usr/bin/python
import subprocess
import sys
import logging
import dateutil.parser
import pytz
from datetime import datetime, timedelta
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def run(args, dry_run=False):
if dry_run:
print "Would have run: " + " ".join(args)
return ""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
logger.error("Could not invoke %s\nstdout: %s\nstderror: %s"
% (args[0], stdout, stderr))
sys.exit(1)
return stdout
# Only run if we're the master
if run(['psql', '-t', '-c', 'select pg_is_in_recovery()']).strip() != 'f':
sys.exit(0)
run(['env-wal-e', 'backup-push', '/var/lib/postgresql/9.1/main'])
now = datetime.now(tz=pytz.utc)
with open('/var/lib/nagios_state/last_postgres_backup', 'w') as f:
f.write(now.isoformat())
f.write("\n")
backups = {}
lines = run(['env-wal-e', 'backup-list']).split("\n")
for line in lines[1:]:
if line:
backup_name, date, _, _ = line.split()
backups[dateutil.parser.parse(date)] = backup_name
one_month_ago = now - timedelta(days=30)
for date in sorted(backups.keys(), reverse=True):
if date < one_month_ago:
run(['env-wal-e', 'delete', '--confirm', 'before', backups[date]])
# Because we're going from most recent to least recent, we
# only have to do one delete operation
break
| apache-2.0 |
agilgur5/LTLMoP | src/lib/handlers/Hexapod/HexapodSensorHandler.py | 7 | 3498 | """
================================================================================
HexapodSensorHandler.py - The Hexapod's Sensor Handler
================================================================================
"""
import time
import logging
import globalConfig
import lib.handlers.handlerTemplates as handlerTemplates
class HexapodSensorHandler(handlerTemplates.SensorHandler):
def __init__(self, executor, shared_data):
"""
Sensor handler for hexapod
"""
# get serial port of hexapod
try:
self.hexapodSer = shared_data["hexapodSer"]
except:
logging.exception("Couldn't connect to Hexapod")
exit(-1)
def _sendCommand(self, cmd):
"""
Send locomotion command ``cmd`` to the robot
"""
self.hexapodSer.write(cmd)
time.sleep(0.1)
byte0 = '\xFF'
byte1 = byte0
while not (byte0 == '\x22' and byte1 == '\x23'): #checks to see if two consecutive packets retrieved are correct
byte0 = byte1 #if true, then read in analog value from force sensor
byte1 = self.hexapodSer.read()
if not byte1: #if false, then sending has failed and keep looping
logging.debug('failed')
return -1
reading = self.hexapodSer.read(2) #read analog value from force sensors
return reading
def _checkLeft(self):
"""
check left force sensor
"""
orientation = False
while not orientation:
y = self._sendCommand('k') #send command to Arduino to make reading from force sensor
if y == -1: #if no reading, continue to try reading
continue
if y[0] == 'l': #if char 'l' is retrieved, next byte retrieved is reading from left force sensor
orientation = True
leftSensor = y[1]
return ord(leftSensor) #return integer value of left sensor
def _checkRight(self):
"""
check right force sensor
"""
orientation = False
while not orientation:
y = self._sendCommand('l') #send command to Arduino to make reading from force sensor
if y == -1: #if no reading, continue to try reading
continue
if y[0] == 'r': #if char 'r' is retrieved, next byte retrieved is reading from right force sensor
orientation = True
rightSensor = y[1]
return ord(rightSensor) #return integer value of right sensor
def isObjectDetect(self, thresh, initial = False):
"""
Uses the force sensor on the hexapod to detect objects in gripper
thresh (int): The threshold for amount of force (default=50)
"""
if initial:
return False
else:
# read in values from force sensor and compare to assigned threshold
# if any side of the sensor reads value larger then the threshold
# we consider there is an item in the gripper
if (self._checkRight() > thresh) or (self._checkLeft() > thresh):
return True
else:
return False
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.