repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
asridharan/dcos | gen/__init__.py | 6 | 26674 | """Helps build config packages for installer-specific templates.
Takes in a bunch of configuration files, as well as functions to calculate the values/strings which
need to be put into the configuration.
Operates strictly:
- All paramaters are strings. All things calculated / derived are strings.
- Every given parameter must map to some real config option.
- Every config option must be given only once.
- Defaults can be overridden. If no default is given, the parameter must be specified
- empty string is not the same as "not specified"
"""
import importlib.machinery
import json
import logging as log
import os
import os.path
import pprint
import textwrap
from copy import copy, deepcopy
from tempfile import TemporaryDirectory
from typing import List
import yaml
import gen.calc
import gen.internals
import gen.template
from gen.exceptions import ValidationError
from pkgpanda import PackageId
from pkgpanda.util import hash_checkout, json_prettyprint, load_string, make_tar, split_by_token, write_json, write_yaml
# List of all roles all templates should have.
role_names = {"master", "slave", "slave_public"}
role_template = '/etc/mesosphere/roles/{}'
CLOUDCONFIG_KEYS = {'coreos', 'runcmd', 'apt_sources', 'root', 'mounts', 'disk_setup', 'fs_setup', 'bootcmd'}
PACKAGE_KEYS = {'package', 'root'}
def stringify_configuration(configuration: dict):
"""Create a stringified version of the complete installer configuration
to send to gen.generate()"""
gen_config = {}
for key, value in configuration.items():
if isinstance(value, list) or isinstance(value, dict):
log.debug("Caught %s for genconf configuration, transforming to JSON string: %s", type(value), value)
value = json.dumps(value)
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
elif isinstance(value, int):
log.debug("Caught int for genconf configuration, transforming to string: %s", value)
value = str(value)
elif isinstance(value, str):
pass
else:
log.error("Invalid type for value of %s in config. Got %s, only can handle list, dict, "
"int, bool, and str", key, type(value))
raise Exception()
gen_config[key] = value
log.debug('Stringified configuration: \n{}'.format(gen_config))
return gen_config
def add_roles(cloudconfig, roles):
for role in roles:
cloudconfig['write_files'].append({
"path": role_template.format(role),
"content": ""})
return cloudconfig
def add_units(cloudconfig, services, cloud_init_implementation='coreos'):
'''
Takes a services dict in the format of CoreOS cloud-init 'units' and
injects into cloudconfig a transformed version appropriate for the
cloud_init_implementation. See:
https://coreos.com/os/docs/latest/cloud-config.html for the CoreOS 'units'
specification. See: https://cloudinit.readthedocs.io/en/latest/index.html
for the Canonical implementation.
Parameters:
* cloudconfig is a dict
* services is a list of dict's
* cloud_init_implementation is a string: 'coreos' or 'canonical'
'''
if cloud_init_implementation == 'canonical':
cloudconfig.setdefault('write_files', [])
cloudconfig.setdefault('runcmd', [])
for unit in services:
unit_name = unit['name']
if 'content' in unit:
write_files_entry = {'path': '/etc/systemd/system/{}'.format(unit_name),
'content': unit['content'],
'permissions': '0644'}
cloudconfig['write_files'].append(write_files_entry)
if 'enable' in unit and unit['enable']:
runcmd_entry = ['systemctl', 'enable', unit_name]
cloudconfig['runcmd'].append(runcmd_entry)
if 'command' in unit:
opts = []
if 'no_block' in unit and unit['no_block']:
opts.append('--no-block')
if unit['command'] in ['start', 'stop', 'reload', 'restart', 'try-restart', 'reload-or-restart',
'reload-or-try-restart']:
runcmd_entry = ['systemctl'] + opts + [unit['command'], unit_name]
else:
raise Exception("Unsupported unit command: {}".format(unit['command']))
cloudconfig['runcmd'].append(runcmd_entry)
elif cloud_init_implementation == 'coreos':
cloudconfig.setdefault('coreos', {}).setdefault('units', [])
cloudconfig['coreos']['units'] += services
else:
raise Exception("Parameter value '{}' is invalid for cloud_init_implementation".format(
cloud_init_implementation))
return cloudconfig
# For converting util -> a namespace only.
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def render_cloudconfig(data):
return "#cloud-config\n" + render_yaml(data)
utils = Bunch({
"role_template": role_template,
"add_roles": add_roles,
"role_names": role_names,
"add_services": None,
"add_units": add_units,
"render_cloudconfig": render_cloudconfig
})
def render_yaml(data):
return yaml.dump(data, default_style='|', default_flow_style=False)
# Recursively merge to python dictionaries.
# If both base and addition contain the same key, that key's value will be
# merged if it is a dictionary.
# This is unlike the python dict.update() method which just overwrites matching
# keys.
def merge_dictionaries(base, additions):
base_copy = base.copy()
for k, v in additions.items():
try:
if k not in base:
base_copy[k] = v
continue
if isinstance(v, dict) and isinstance(base_copy[k], dict):
base_copy[k] = merge_dictionaries(base_copy.get(k, dict()), v)
continue
# Append arrays
if isinstance(v, list) and isinstance(base_copy[k], list):
base_copy[k].extend(v)
continue
# Merge sets
if isinstance(v, set) and isinstance(base_copy[k], set):
base_copy[k] |= v
continue
# Unknown types
raise ValueError("Can't merge type {} into type {}".format(type(v), type(base_copy[k])))
except ValueError as ex:
raise ValueError("{} inside key {}".format(ex, k)) from ex
return base_copy
def load_templates(template_dict):
result = dict()
for name, template_list in template_dict.items():
result_list = list()
for template_name in template_list:
result_list.append(gen.template.parse_resources(template_name))
extra_filename = "gen_extra/" + template_name
if os.path.exists(extra_filename):
result_list.append(gen.template.parse_str(
load_string(extra_filename)))
result[name] = result_list
return result
# Render the Jinja/YAML into YAML, then load the YAML and merge it to make the
# final configuration files.
def render_templates(template_dict, arguments):
rendered_templates = dict()
templates = load_templates(template_dict)
for name, templates in templates.items():
full_template = None
for template in templates:
rendered_template = template.render(arguments)
# If not yaml, just treat opaquely.
if not name.endswith('.yaml'):
# No merging support currently.
assert len(templates) == 1
full_template = rendered_template
continue
template_data = yaml.safe_load(rendered_template)
if full_template:
full_template = merge_dictionaries(full_template, template_data)
else:
full_template = template_data
rendered_templates[name] = full_template
return rendered_templates
# Collect the un-bound / un-set variables from all the given templates to build
# the schema / configuration target. The templates and their structure serve
# as the schema for what configuration a user must provide.
def target_from_templates(template_dict):
# NOTE: the individual yaml template targets are merged into one target
# since we never want to target just one template at a time for now (they
# all merge into one config package).
target = gen.internals.Target()
templates = load_templates(template_dict)
for template_list in templates.values():
for template in template_list:
target += template.target_from_ast()
return [target]
def write_to_non_taken(base_filename, json):
number = 0
filename = base_filename
while (os.path.exists(filename)):
number += 1
filename = base_filename + '.{}'.format(number)
write_json(filename, json)
return filename
def do_gen_package(config, package_filename):
# Generate the specific dcos-config package.
# Version will be setup-{sha1 of contents}
# Forcibly set umask so that os.makedirs() always makes directories with
# uniform permissions
os.umask(0o000)
with TemporaryDirectory("gen_tmp_pkg") as tmpdir:
# Only contains package, root
assert config.keys() == {"package"}
# Write out the individual files
for file_info in config["package"]:
assert file_info.keys() <= {"path", "content", "permissions"}
if file_info['path'].startswith('/'):
path = tmpdir + file_info['path']
else:
path = tmpdir + '/' + file_info['path']
try:
if os.path.dirname(path):
os.makedirs(os.path.dirname(path), mode=0o755)
except FileExistsError:
pass
with open(path, 'w') as f:
f.write(file_info['content'])
# the file has special mode defined, handle that.
if 'permissions' in file_info:
assert isinstance(file_info['permissions'], str)
os.chmod(path, int(file_info['permissions'], 8))
else:
os.chmod(path, 0o644)
# Ensure the output directory exists
if os.path.dirname(package_filename):
os.makedirs(os.path.dirname(package_filename), exist_ok=True)
# Make the package top level directory readable by users other than the owner (root).
os.chmod(tmpdir, 0o755)
make_tar(package_filename, tmpdir)
log.info("Package filename: %s", package_filename)
def render_late_content(content, late_values):
def _dereference_placeholders(parts):
for part, is_placeholder in parts:
if is_placeholder:
if part not in late_values:
log.debug('Found placeholder for unknown value "{}" in late config: {}'.format(part, repr(content)))
raise Exception('Bad late config file: Found placeholder for unknown value "{}"'.format(part))
yield late_values[part]
else:
yield part
return ''.join(_dereference_placeholders(split_by_token(
gen.internals.LATE_BIND_PLACEHOLDER_START,
gen.internals.LATE_BIND_PLACEHOLDER_END,
content,
strip_token_decoration=True,
)))
def _late_bind_placeholder_in(string_):
return gen.internals.LATE_BIND_PLACEHOLDER_START in string_ or gen.internals.LATE_BIND_PLACEHOLDER_END in string_
def resolve_late_package(config, late_values):
resolved_config = {
'package': [
{k: render_late_content(v, late_values) if k == 'content' else v for k, v in file_info.items()}
for file_info in config['package']
]
}
assert not any(
_late_bind_placeholder_in(v) for file_info in resolved_config['package'] for v in file_info.values()
), 'Resolved late package must not contain late value placeholder: {}'.format(resolved_config)
return resolved_config
def extract_files_containing_late_variables(start_files):
found_files = []
left_files = []
for file_info in deepcopy(start_files):
assert not any(_late_bind_placeholder_in(v) for k, v in file_info.items() if k != 'content'), (
'File info must not contain late config placeholder in fields other than content: {}'.format(file_info)
)
if _late_bind_placeholder_in(file_info['content']):
found_files.append(file_info)
else:
left_files.append(file_info)
# All files still belong somewhere
assert len(found_files) + len(left_files) == len(start_files)
return found_files, left_files
# Validate all arguments passed in actually correspond to parameters to
# prevent human typo errors.
# This includes all possible sub scopes (Including config for things you don't use is fine).
def flatten_parameters(scoped_parameters):
flat = copy(scoped_parameters.get('variables', set()))
for name, possible_values in scoped_parameters.get('sub_scopes', dict()).items():
flat.add(name)
for sub_scope in possible_values.values():
flat |= flatten_parameters(sub_scope)
return flat
def validate_all_arguments_match_parameters(parameters, setters, arguments):
errors = dict()
# Gather all possible parameters from templates as well as setter parameters.
all_parameters = flatten_parameters(parameters)
for setter_list in setters.values():
for setter in setter_list:
all_parameters |= setter.parameters
all_parameters.add(setter.name)
all_parameters |= {name for name, value in setter.conditions}
# Check every argument is in the set of parameters.
for argument in arguments:
if argument not in all_parameters:
errors[argument] = 'Argument {} given but not in possible parameters {}'.format(argument, all_parameters)
if len(errors):
raise ValidationError(errors, set())
def validate(
arguments,
extra_templates=list(),
extra_sources=list()):
sources, targets, _ = get_dcosconfig_source_target_and_templates(arguments, extra_templates, extra_sources)
return gen.internals.resolve_configuration(sources, targets).status_dict
def user_arguments_to_source(user_arguments) -> gen.internals.Source:
"""Convert all user arguments to be a gen.internals.Source"""
# Make sure all user provided arguments are strings.
# TODO(cmaloney): Loosen this restriction / allow arbitrary types as long
# as they all have a gen specific string form.
gen.internals.validate_arguments_strings(user_arguments)
user_source = gen.internals.Source(is_user=True)
for name, value in user_arguments.items():
user_source.add_must(name, value)
return user_source
# TODO(cmaloney): This function should disolve away like the ssh one is and just become a big
# static dictonary or pass in / construct on the fly at the various template callsites.
def get_dcosconfig_source_target_and_templates(
user_arguments: dict,
extra_templates: List[str],
extra_sources: List[gen.internals.Source]):
log.info("Generating configuration files...")
# TODO(cmaloney): Make these all just defined by the base calc.py
config_package_names = ['dcos-config', 'dcos-metadata']
template_filenames = ['dcos-config.yaml', 'cloud-config.yaml', 'dcos-metadata.yaml', 'dcos-services.yaml']
# TODO(cmaloney): Check there are no duplicates between templates and extra_template_files
template_filenames += extra_templates
# Re-arrange templates to be indexed by common name. Only allow multiple for one key if the key
# is yaml (ends in .yaml).
templates = dict()
for filename in template_filenames:
key = os.path.basename(filename)
templates.setdefault(key, list())
templates[key].append(filename)
if len(templates[key]) > 1 and not key.endswith('.yaml'):
raise Exception(
"Internal Error: Only know how to merge YAML templates at this point in time. "
"Can't merge template {} in template_list {}".format(filename, templates[key]))
targets = target_from_templates(templates)
base_source = gen.internals.Source(is_user=False)
base_source.add_entry(gen.calc.entry, replace_existing=False)
# Allow overriding calculators with a `gen_extra/calc.py` if it exists
if os.path.exists('gen_extra/calc.py'):
mod = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module()
base_source.add_entry(mod.entry, replace_existing=True)
def add_builtin(name, value):
base_source.add_must(name, json_prettyprint(value))
sources = [base_source, user_arguments_to_source(user_arguments)] + extra_sources
# TODO(cmaloney): Hash the contents of all the templates rather than using the list of filenames
# since the filenames might not live in this git repo, or may be locally modified.
add_builtin('template_filenames', template_filenames)
add_builtin('config_package_names', list(config_package_names))
# TODO(cmaloney): user_arguments needs to be a temporary_str since we need to only include used
# arguments inside of it.
add_builtin('user_arguments', user_arguments)
# Add a builtin for expanded_config, so that we won't get unset argument errors. The temporary
# value will get replaced with the set of all arguments once calculation is complete
temporary_str = 'DO NOT USE THIS AS AN ARGUMENT TO OTHER ARGUMENTS. IT IS TEMPORARY'
add_builtin('expanded_config', temporary_str)
# Note: must come last so the hash of the "base_source" this is beign added to contains all the
# variables but this.
add_builtin('sources_id', hash_checkout([hash_checkout(source.make_id()) for source in sources]))
return sources, targets, templates
def build_late_package(late_files, config_id, provider):
if not late_files:
return None
# Add a empty pkginfo.json to the late package after validating there
# isn't already one.
for file_info in late_files:
assert file_info['path'] != '/pkginfo.json'
assert file_info['path'].startswith('/')
late_files.append({
"path": "/pkginfo.json",
"content": "{}"})
return {
'package': late_files,
'name': 'dcos-provider-{}-{}--setup'.format(config_id, provider)
}
def validate_and_raise(sources, targets):
# TODO(cmaloney): Make it so we only get out the dcosconfig target arguments not all the config target arguments.
resolver = gen.internals.resolve_configuration(sources, targets)
status = resolver.status_dict
if status['status'] == 'errors':
raise ValidationError(errors=status['errors'], unset=status['unset'])
return resolver
def get_late_variables(resolver, sources):
# Gather out the late variables. The presence of late variables changes
# whether or not a late package is created
late_variables = dict()
# TODO(branden): Get the late vars and expressions from resolver.late
for source in sources:
for setter_list in source.setters.values():
for setter in setter_list:
if not setter.is_late:
continue
if setter.name not in resolver.late:
continue
# Skip late vars that aren't referenced by config.
if not resolver.arguments[setter.name].is_finalized:
continue
# Validate a late variable should only have one source.
assert setter.name not in late_variables
late_variables[setter.name] = setter.late_expression
log.debug('Late variables:\n{}'.format(pprint.pformat(late_variables)))
return late_variables
def get_final_arguments(resolver):
return {k: v.value for k, v in resolver.arguments.items() if v.is_finalized}
def generate(
arguments,
extra_templates=list(),
extra_sources=list(),
extra_targets=list()):
# To maintain the old API where we passed arguments rather than the new name.
user_arguments = arguments
arguments = None
sources, targets, templates = get_dcosconfig_source_target_and_templates(
user_arguments, extra_templates, extra_sources)
resolver = validate_and_raise(sources, targets + extra_targets)
argument_dict = get_final_arguments(resolver)
late_variables = get_late_variables(resolver, sources)
# expanded_config is a special result which contains all other arguments. It has to come after
# the calculation of all the other arguments so it can be filled with everything which was
# calculated. Can't be calculated because that would have an infinite recursion problem (the set
# of all arguments would want to include itself).
# Explicitly / manaully setup so that it'll fit where we want it.
# TODO(cmaloney): Make this late-bound by gen.internals
argument_dict['expanded_config'] = textwrap.indent(
json_prettyprint(
{k: v for k, v in argument_dict.items() if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)}
),
prefix=' ' * 3,
)
log.debug("Final arguments:" + json_prettyprint(argument_dict))
# Fill in the template parameters
# TODO(cmaloney): render_templates should ideally take the template targets.
rendered_templates = render_templates(templates, argument_dict)
# Validate there aren't any unexpected top level directives in any of the files
# (likely indicates a misspelling)
for name, template in rendered_templates.items():
if name == 'dcos-services.yaml': # yaml list of the service files
assert isinstance(template, list)
elif name == 'cloud-config.yaml':
assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
elif isinstance(template, str): # Not a yaml template
pass
else: # yaml template file
log.debug("validating template file %s", name)
assert template.keys() <= PACKAGE_KEYS, template.keys()
# Find all files which contain late bind variables and turn them into a "late bind package"
# TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
late_files, regular_files = extract_files_containing_late_variables(
rendered_templates['dcos-config.yaml']['package'])
# put the regular files right back
rendered_templates['dcos-config.yaml'] = {'package': regular_files}
def make_package_filename(package_id, extension):
return 'packages/{0}/{1}{2}'.format(
package_id.name,
repr(package_id),
extension)
# Render all the cluster packages
cluster_package_info = {}
# Prepare late binding config, if any.
late_package = build_late_package(late_files, argument_dict['config_id'], argument_dict['provider'])
if late_variables:
# Render the late binding package. This package will be downloaded onto
# each cluster node during bootstrap and rendered into the final config
# using the values from the late config file.
late_package_id = PackageId(late_package['name'])
late_package_filename = make_package_filename(late_package_id, '.dcos_config')
os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
log.info('Package filename: {}'.format(late_package_filename))
# Add the late config file to cloud config. The expressions in
# late_variables will be resolved by the service handling the cloud
# config (e.g. Amazon CloudFormation). The rendered late config file
# on a cluster node's filesystem will contain the final values.
rendered_templates['cloud-config.yaml']['root'].append({
'path': '/etc/mesosphere/setup-flags/late-config.yaml',
'permissions': '0644',
'owner': 'root',
# TODO(cmaloney): don't prettyprint to save bytes.
# NOTE: Use yaml here simply to make avoiding painful escaping and
# unescaping easier.
'content': render_yaml({
'late_bound_package_id': late_package['name'],
'bound_values': late_variables
})})
# Collect metadata for cluster packages.
for package_id_str in json.loads(argument_dict['cluster_packages']):
package_id = PackageId(package_id_str)
package_filename = make_package_filename(package_id, '.tar.xz')
cluster_package_info[package_id.name] = {
'id': package_id_str,
'filename': package_filename
}
# Render config packages.
config_package_ids = json.loads(argument_dict['config_package_ids'])
for package_id_str in config_package_ids:
package_id = PackageId(package_id_str)
do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
# Convert cloud-config to just contain write_files rather than root
cc = rendered_templates['cloud-config.yaml']
# Shouldn't contain any packages. Providers should pull what they need to
# late bind out of other packages via cc_package_file.
assert 'package' not in cc
cc_root = cc.pop('root', [])
# Make sure write_files exists.
assert 'write_files' not in cc
cc['write_files'] = []
# Do the transform
for item in cc_root:
assert item['path'].startswith('/')
cc['write_files'].append(item)
rendered_templates['cloud-config.yaml'] = cc
# Add in the add_services util. Done here instead of the initial
# map since we need to bind in parameters
def add_services(cloudconfig, cloud_init_implementation):
return add_units(cloudconfig, rendered_templates['dcos-services.yaml'], cloud_init_implementation)
utils.add_services = add_services
return Bunch({
'arguments': argument_dict,
'cluster_packages': cluster_package_info,
'config_package_ids': config_package_ids,
'late_package_id': late_package['name'] if late_package else None,
'templates': rendered_templates,
'utils': utils
})
| apache-2.0 |
mcus/SickRage | lib/sqlalchemy/orm/identity.py | 78 | 7021 | # orm/identity.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from .. import util
class IdentityMap(dict):
def __init__(self):
self._modified = set()
self._wr = weakref.ref(self)
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def update(self, dict):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __init__(self):
IdentityMap.__init__(self)
def __getitem__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if dict.__contains__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
return dict.get(self, state.key) is state
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state)
self._manage_incoming_state(state)
def add(self, state):
key = state.key
# inline of self.__contains__
if dict.__contains__(self, key):
try:
existing_state = dict.__getitem__(self, key)
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise AssertionError(
"A conflicting state is already "
"present in the identity map for key %r"
% (key, ))
else:
return
except KeyError:
pass
dict.__setitem__(self, key, state)
self._manage_incoming_state(state)
def get(self, key, default=None):
state = dict.get(self, key, default)
if state is default:
return default
o = state.obj()
if o is None:
return default
return o
def _items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def _values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
if util.py2k:
items = _items
values = _values
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
else:
def items(self):
return iter(self._items())
def values(self):
return iter(self._values())
def all_states(self):
if util.py2k:
return dict.values(self)
else:
return list(dict.values(self))
def discard(self, state):
st = dict.get(self, state.key, None)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self and
attributes.instance_state(self[state.key]) is state)
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def add(self, state):
if state.key in self:
if attributes.instance_state(dict.__getitem__(self,
state.key)) is not state:
raise AssertionError('A conflicting state is already '
'present in the identity map for key %r'
% (state.key, ))
else:
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def discard(self, state):
obj = dict.get(self, state.key, None)
if obj is not None:
st = attributes.instance_state(obj)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
dict.clear(self)
dict.update(self, keepers)
self.modified = bool(dirty)
return ref_count - len(self)
| gpl-3.0 |
grdlok/UStar-dl | src/youtube_dl/extractor/viki.py | 35 | 3332 | from __future__ import unicode_literals
import re
from ..utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
US_RATINGS,
)
from .subtitles import SubtitlesInfoExtractor
class VikiIE(SubtitlesInfoExtractor):
IE_NAME = 'viki'
_VALID_URL = r'^https?://(?:www\.)?viki\.com/videos/(?P<id>[0-9]+v)'
_TEST = {
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'md5': 'a21454021c2646f5433514177e2caa5f',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
'skip': 'Blocked in the US',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
uploader_m = re.search(
r'<strong>Broadcast Network: </strong>\s*([^<]*)<', webpage)
if uploader_m is None:
uploader = None
else:
uploader = uploader_m.group(1).strip()
rating_str = self._html_search_regex(
r'<strong>Rating: </strong>\s*([^<]*)<', webpage,
'rating information', default='').strip()
age_limit = US_RATINGS.get(rating_str)
info_url = 'http://www.viki.com/player5_fragment/%s?action=show&controller=videos' % video_id
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
if re.match(r'\s*<div\s+class="video-error', info_webpage):
raise ExtractorError(
'Video %s is blocked from your location.' % video_id,
expected=True)
video_url = self._html_search_regex(
r'<source[^>]+src="([^"]+)"', info_webpage, 'video URL')
upload_date_str = self._html_search_regex(
r'"created_at":"([^"]+)"', info_webpage, 'upload date')
upload_date = (
unified_strdate(upload_date_str)
if upload_date_str is not None
else None
)
# subtitles
video_subtitles = self.extract_subtitles(video_id, info_webpage)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, info_webpage)
return
return {
'id': video_id,
'title': title,
'url': video_url,
'description': description,
'thumbnail': thumbnail,
'age_limit': age_limit,
'uploader': uploader,
'subtitles': video_subtitles,
'upload_date': upload_date,
}
def _get_available_subtitles(self, video_id, info_webpage):
res = {}
for sturl_html in re.findall(r'<track src="([^"]+)"/>', info_webpage):
sturl = unescapeHTML(sturl_html)
m = re.search(r'/(?P<lang>[a-z]+)\.vtt', sturl)
if not m:
continue
res[m.group('lang')] = sturl
return res
| unlicense |
Rosiak/librenms | poller-wrapper.py | 17 | 13997 | #! /usr/bin/env python2
"""
poller-wrapper A small tool which wraps around the poller and tries to
guide the polling process with a more modern approach with a
Queue and workers
Author: Job Snijders <job.snijders@atrato.com>
Date: Jan 2013
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 16 threads.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
Tested on: Python 2.7.3 / PHP 5.3.10-1ubuntu3.4 / Ubuntu 12.04 LTS
License: To the extent possible under law, Job Snijders has waived all
copyright and related or neighboring rights to this script.
This script has been put into the Public Domain. This work is
published from: The Netherlands.
"""
try:
import json
import os
import Queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
"""
Fetch configuration details from the config_to_json.php script
"""
ob_install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = ob_install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % ob_install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
poller_path = config['install_dir'] + '/poller.php'
log_dir = config['log_dir']
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
db_server = config['db_host']
db_socket = None
db_dbname = config['db_name']
def db_open():
try:
if db_socket:
db = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
poller_group = str(config['distributed_poller_group'])
else:
poller_group = False
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('poller.ping.' + key, key, 60)
if memc.get('poller.ping.' + key) == key:
memc.delete('poller.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
if 'rrd' in config and 'step' in config['rrd']:
step = config['rrd']['step']
else:
step = 300
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
time_tag = str(get_time_tag(step))
master_tag = "poller.master." + time_tag
nodes_tag = "poller.nodes." + time_tag
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get(master_tag)) == config['distributed_poller_name']:
print "This system is already joined as the poller master."
sys.exit(2)
if memc_alive():
if memc.get(master_tag) is None:
print "Registered as Master"
memc.set(master_tag, config['distributed_poller_name'], 10)
memc.set(nodes_tag, 0, step)
IsNode = False
else:
print "Registered as Node joining Master %s" % memc.get(master_tag)
IsNode = True
memc.incr(nodes_tag)
distpoll = True
else:
print "Could not connect to memcached, disabling distributed poller."
distpoll = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed poller."
distpoll = False
else:
distpoll = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
polled_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 16 (Do not set too high)"
description = "Spawn multiple poller.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 16
devices_list = []
"""
This query specificly orders the results depending on the last_polled_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
poll the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if poller_group is not False:
query = "select device_id from devices where poller_group IN(" + poller_group + ") and disabled = 0 order by last_polled_timetaken desc"
else:
query = "select device_id from devices where disabled = 0 order by last_polled_timetaken desc"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distpoll and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global distpoll
if distpoll:
if not IsNode:
memc_touch(master_tag, 10)
nodes = memc.get(nodes_tag)
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
distpoll = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
nodeso = nodes
else:
memc_touch(nodes_tag, 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global polled_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
polled_devices += 1
if elapsed_time < step:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print_queue.task_done()
"""
This class will fork off single instances of the poller.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not distpoll or memc.get('poller.device.%s.%s'% (device_id, time_tag)) is None:
if distpoll:
result = memc.add('poller.device.%s.%s'% (device_id, time_tag), config['distributed_poller_name'], step)
if not result:
print "This device (%s) appears to be being polled by another poller" % (device_id)
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not polling Device %s as Node. Master will poll it." % device_id
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
output = "-d >> %s/poll_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
command = "/usr/bin/env php %s -h %s %s 2>&1" % (poller_path, device_id, output)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the poller at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print "INFO: poller-wrapper polled %s devices in %s seconds with %s workers" % (polled_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distpoll or memc_alive():
master = memc.get(master_tag)
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all poller-nodes to finish"
nodes = memc.get(nodes_tag)
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get(nodes_tag)
except:
pass
print "Clearing Locks for %s" % time_tag
x = minlocks
while x <= maxlocks:
res = memc.delete('poller.device.%s.%s' % (x, time_tag))
x += 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete(master_tag)
memc.delete(nodes_tag)
else:
memc.decr(nodes_tag)
print "Finished %.3fs after interval start." % (time.time() - int(time_tag))
# EOC6
show_stopper = False
db = db_open()
cursor = db.cursor()
query = "update pollers set last_polled=NOW(), devices='%d', time_taken='%d' where poller_name='%s'" % (polled_devices,
total_time, config['distributed_poller_name'])
response = cursor.execute(query)
if response == 1:
db.commit()
else:
query = "insert into pollers set poller_name='%s', last_polled=NOW(), devices='%d', time_taken='%d'" % (
config['distributed_poller_name'], polled_devices, total_time)
cursor.execute(query)
db.commit()
db.close()
if total_time > step:
print "WARNING: the process took more than %s seconds to finish, you need faster hardware or more threads" % step
print "INFO: in sequential style polling the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > step:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than %s seconds, the script cannot recommend you what to do." % step
else:
recommend = int(total_time / step * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
sys.exit(2)
| gpl-3.0 |
alexdebrie/moto | moto/sns/responses.py | 8 | 25957 | from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .models import sns_backends
class SNSResponse(BaseResponse):
@property
def backend(self):
return sns_backends[self.region]
@property
def request_json(self):
return 'JSON' in self.querystring.get('ContentType', [])
def _get_attributes(self):
attributes = self._get_list_prefix('Attributes.entry')
return dict(
(attribute['key'], attribute['value'])
for attribute
in attributes
)
def create_topic(self):
name = self._get_param('Name')
topic = self.backend.create_topic(name)
if self.request_json:
return json.dumps({
'CreateTopicResponse': {
'CreateTopicResult': {
'TopicArn': topic.arn,
},
'ResponseMetadata': {
'RequestId': 'a8dec8b3-33a4-11df-8963-01868b7c937a',
}
}
})
template = self.response_template(CREATE_TOPIC_TEMPLATE)
return template.render(topic=topic)
def list_topics(self):
next_token = self._get_param('NextToken')
topics, next_token = self.backend.list_topics(next_token=next_token)
if self.request_json:
return json.dumps({
'ListTopicsResponse': {
'ListTopicsResult': {
'Topics': [{'TopicArn': topic.arn} for topic in topics],
'NextToken': next_token,
}
},
'ResponseMetadata': {
'RequestId': 'a8dec8b3-33a4-11df-8963-01868b7c937a',
}
})
template = self.response_template(LIST_TOPICS_TEMPLATE)
return template.render(topics=topics, next_token=next_token)
def delete_topic(self):
topic_arn = self._get_param('TopicArn')
self.backend.delete_topic(topic_arn)
if self.request_json:
return json.dumps({
'DeleteTopicResponse': {
'ResponseMetadata': {
'RequestId': 'a8dec8b3-33a4-11df-8963-01868b7c937a',
}
}
})
template = self.response_template(DELETE_TOPIC_TEMPLATE)
return template.render()
def get_topic_attributes(self):
topic_arn = self._get_param('TopicArn')
topic = self.backend.get_topic(topic_arn)
if self.request_json:
return json.dumps({
"GetTopicAttributesResponse": {
"GetTopicAttributesResult": {
"Attributes": {
"Owner": topic.account_id,
"Policy": topic.policy,
"TopicArn": topic.arn,
"DisplayName": topic.display_name,
"SubscriptionsPending": topic.subscriptions_pending,
"SubscriptionsConfirmed": topic.subscriptions_confimed,
"SubscriptionsDeleted": topic.subscriptions_deleted,
"DeliveryPolicy": topic.delivery_policy,
"EffectiveDeliveryPolicy": topic.effective_delivery_policy,
}
},
"ResponseMetadata": {
"RequestId": "057f074c-33a7-11df-9540-99d0768312d3"
}
}
})
template = self.response_template(GET_TOPIC_ATTRIBUTES_TEMPLATE)
return template.render(topic=topic)
def set_topic_attributes(self):
topic_arn = self._get_param('TopicArn')
attribute_name = self._get_param('AttributeName')
attribute_name = camelcase_to_underscores(attribute_name)
attribute_value = self._get_param('AttributeValue')
self.backend.set_topic_attribute(topic_arn, attribute_name, attribute_value)
if self.request_json:
return json.dumps({
"SetTopicAttributesResponse": {
"ResponseMetadata": {
"RequestId": "a8763b99-33a7-11df-a9b7-05d48da6f042"
}
}
})
template = self.response_template(SET_TOPIC_ATTRIBUTES_TEMPLATE)
return template.render()
def subscribe(self):
topic_arn = self._get_param('TopicArn')
endpoint = self._get_param('Endpoint')
protocol = self._get_param('Protocol')
subscription = self.backend.subscribe(topic_arn, endpoint, protocol)
if self.request_json:
return json.dumps({
"SubscribeResponse": {
"SubscribeResult": {
"SubscriptionArn": subscription.arn,
},
"ResponseMetadata": {
"RequestId": "a8763b99-33a7-11df-a9b7-05d48da6f042"
}
}
})
template = self.response_template(SUBSCRIBE_TEMPLATE)
return template.render(subscription=subscription)
def unsubscribe(self):
subscription_arn = self._get_param('SubscriptionArn')
self.backend.unsubscribe(subscription_arn)
if self.request_json:
return json.dumps({
"UnsubscribeResponse": {
"ResponseMetadata": {
"RequestId": "a8763b99-33a7-11df-a9b7-05d48da6f042"
}
}
})
template = self.response_template(UNSUBSCRIBE_TEMPLATE)
return template.render()
def list_subscriptions(self):
next_token = self._get_param('NextToken')
subscriptions, next_token = self.backend.list_subscriptions(next_token=next_token)
if self.request_json:
return json.dumps({
"ListSubscriptionsResponse": {
"ListSubscriptionsResult": {
"Subscriptions": [{
"TopicArn": subscription.topic.arn,
"Protocol": subscription.protocol,
"SubscriptionArn": subscription.arn,
"Owner": subscription.topic.account_id,
"Endpoint": subscription.endpoint,
} for subscription in subscriptions],
'NextToken': next_token,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
template = self.response_template(LIST_SUBSCRIPTIONS_TEMPLATE)
return template.render(subscriptions=subscriptions,
next_token=next_token)
def list_subscriptions_by_topic(self):
topic_arn = self._get_param('TopicArn')
next_token = self._get_param('NextToken')
subscriptions, next_token = self.backend.list_subscriptions(topic_arn, next_token=next_token)
if self.request_json:
return json.dumps({
"ListSubscriptionsByTopicResponse": {
"ListSubscriptionsByTopicResult": {
"Subscriptions": [{
"TopicArn": subscription.topic.arn,
"Protocol": subscription.protocol,
"SubscriptionArn": subscription.arn,
"Owner": subscription.topic.account_id,
"Endpoint": subscription.endpoint,
} for subscription in subscriptions],
'NextToken': next_token,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
template = self.response_template(LIST_SUBSCRIPTIONS_BY_TOPIC_TEMPLATE)
return template.render(subscriptions=subscriptions,
next_token=next_token)
def publish(self):
target_arn = self._get_param('TargetArn')
topic_arn = self._get_param('TopicArn')
arn = target_arn if target_arn else topic_arn
message = self._get_param('Message')
message_id = self.backend.publish(arn, message)
if self.request_json:
return json.dumps({
"PublishResponse": {
"PublishResult": {
"MessageId": message_id,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
template = self.response_template(PUBLISH_TEMPLATE)
return template.render(message_id=message_id)
def create_platform_application(self):
name = self._get_param('Name')
platform = self._get_param('Platform')
attributes = self._get_attributes()
platform_application = self.backend.create_platform_application(self.region, name, platform, attributes)
if self.request_json:
return json.dumps({
"CreatePlatformApplicationResponse": {
"CreatePlatformApplicationResult": {
"PlatformApplicationArn": platform_application.arn,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937b",
}
}
})
template = self.response_template(CREATE_PLATFORM_APPLICATION_TEMPLATE)
return template.render(platform_application=platform_application)
def get_platform_application_attributes(self):
arn = self._get_param('PlatformApplicationArn')
application = self.backend.get_application(arn)
if self.request_json:
return json.dumps({
"GetPlatformApplicationAttributesResponse": {
"GetPlatformApplicationAttributesResult": {
"Attributes": application.attributes,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937f",
}
}
})
template = self.response_template(GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE)
return template.render(application=application)
def set_platform_application_attributes(self):
arn = self._get_param('PlatformApplicationArn')
attributes = self._get_attributes()
self.backend.set_application_attributes(arn, attributes)
if self.request_json:
return json.dumps({
"SetPlatformApplicationAttributesResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-12df-8963-01868b7c937f",
}
}
})
template = self.response_template(SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE)
return template.render()
def list_platform_applications(self):
applications = self.backend.list_platform_applications()
if self.request_json:
return json.dumps({
"ListPlatformApplicationsResponse": {
"ListPlatformApplicationsResult": {
"PlatformApplications": [{
"PlatformApplicationArn": application.arn,
"attributes": application.attributes,
} for application in applications],
"NextToken": None
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937c",
}
}
})
template = self.response_template(LIST_PLATFORM_APPLICATIONS_TEMPLATE)
return template.render(applications=applications)
def delete_platform_application(self):
platform_arn = self._get_param('PlatformApplicationArn')
self.backend.delete_platform_application(platform_arn)
if self.request_json:
return json.dumps({
"DeletePlatformApplicationResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937e",
}
}
})
template = self.response_template(DELETE_PLATFORM_APPLICATION_TEMPLATE)
return template.render()
def create_platform_endpoint(self):
application_arn = self._get_param('PlatformApplicationArn')
application = self.backend.get_application(application_arn)
custom_user_data = self._get_param('CustomUserData')
token = self._get_param('Token')
attributes = self._get_attributes()
platform_endpoint = self.backend.create_platform_endpoint(
self.region, application, custom_user_data, token, attributes)
if self.request_json:
return json.dumps({
"CreatePlatformEndpointResponse": {
"CreatePlatformEndpointResult": {
"EndpointArn": platform_endpoint.arn,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3779-11df-8963-01868b7c937b",
}
}
})
template = self.response_template(CREATE_PLATFORM_ENDPOINT_TEMPLATE)
return template.render(platform_endpoint=platform_endpoint)
def list_endpoints_by_platform_application(self):
application_arn = self._get_param('PlatformApplicationArn')
endpoints = self.backend.list_endpoints_by_platform_application(application_arn)
if self.request_json:
return json.dumps({
"ListEndpointsByPlatformApplicationResponse": {
"ListEndpointsByPlatformApplicationResult": {
"Endpoints": [
{
"Attributes": endpoint.attributes,
"EndpointArn": endpoint.arn,
} for endpoint in endpoints
],
"NextToken": None
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
template = self.response_template(LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE)
return template.render(endpoints=endpoints)
def get_endpoint_attributes(self):
arn = self._get_param('EndpointArn')
endpoint = self.backend.get_endpoint(arn)
if self.request_json:
return json.dumps({
"GetEndpointAttributesResponse": {
"GetEndpointAttributesResult": {
"Attributes": endpoint.attributes,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937f",
}
}
})
template = self.response_template(GET_ENDPOINT_ATTRIBUTES_TEMPLATE)
return template.render(endpoint=endpoint)
def set_endpoint_attributes(self):
arn = self._get_param('EndpointArn')
attributes = self._get_attributes()
self.backend.set_endpoint_attributes(arn, attributes)
if self.request_json:
return json.dumps({
"SetEndpointAttributesResponse": {
"ResponseMetadata": {
"RequestId": "384bc68d-3775-12df-8963-01868b7c937f",
}
}
})
template = self.response_template(SET_ENDPOINT_ATTRIBUTES_TEMPLATE)
return template.render()
CREATE_TOPIC_TEMPLATE = """<CreateTopicResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<CreateTopicResult>
<TopicArn>{{ topic.arn }}</TopicArn>
</CreateTopicResult>
<ResponseMetadata>
<RequestId>a8dec8b3-33a4-11df-8963-01868b7c937a</RequestId>
</ResponseMetadata>
</CreateTopicResponse>"""
LIST_TOPICS_TEMPLATE = """<ListTopicsResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListTopicsResult>
<Topics>
{% for topic in topics %}
<member>
<TopicArn>{{ topic.arn }}</TopicArn>
</member>
{% endfor %}
</Topics>
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</ListTopicsResult>
<ResponseMetadata>
<RequestId>3f1478c7-33a9-11df-9540-99d0768312d3</RequestId>
</ResponseMetadata>
</ListTopicsResponse>"""
DELETE_TOPIC_TEMPLATE = """<DeleteTopicResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>f3aa9ac9-3c3d-11df-8235-9dab105e9c32</RequestId>
</ResponseMetadata>
</DeleteTopicResponse>"""
GET_TOPIC_ATTRIBUTES_TEMPLATE = """<GetTopicAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<GetTopicAttributesResult>
<Attributes>
<entry>
<key>Owner</key>
<value>{{ topic.account_id }}</value>
</entry>
<entry>
<key>Policy</key>
<value>{{ topic.policy }}</value>
</entry>
<entry>
<key>TopicArn</key>
<value>{{ topic.arn }}</value>
</entry>
<entry>
<key>DisplayName</key>
<value>{{ topic.display_name }}</value>
</entry>
<entry>
<key>SubscriptionsPending</key>
<value>{{ topic.subscriptions_pending }}</value>
</entry>
<entry>
<key>SubscriptionsConfirmed</key>
<value>{{ topic.subscriptions_confimed }}</value>
</entry>
<entry>
<key>SubscriptionsDeleted</key>
<value>{{ topic.subscriptions_deleted }}</value>
</entry>
<entry>
<key>DeliveryPolicy</key>
<value>{{ topic.delivery_policy }}</value>
</entry>
<entry>
<key>EffectiveDeliveryPolicy</key>
<value>{{ topic.effective_delivery_policy }}</value>
</entry>
</Attributes>
</GetTopicAttributesResult>
<ResponseMetadata>
<RequestId>057f074c-33a7-11df-9540-99d0768312d3</RequestId>
</ResponseMetadata>
</GetTopicAttributesResponse>"""
SET_TOPIC_ATTRIBUTES_TEMPLATE = """<SetTopicAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>a8763b99-33a7-11df-a9b7-05d48da6f042</RequestId>
</ResponseMetadata>
</SetTopicAttributesResponse>"""
CREATE_PLATFORM_APPLICATION_TEMPLATE = """<CreatePlatformApplicationResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<CreatePlatformApplicationResult>
<PlatformApplicationArn>{{ platform_application.arn }}</PlatformApplicationArn>
</CreatePlatformApplicationResult>
<ResponseMetadata>
<RequestId>b6f0e78b-e9d4-5a0e-b973-adc04e8a4ff9</RequestId>
</ResponseMetadata>
</CreatePlatformApplicationResponse>"""
CREATE_PLATFORM_ENDPOINT_TEMPLATE = """<CreatePlatformEndpointResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<CreatePlatformEndpointResult>
<EndpointArn>{{ platform_endpoint.arn }}</EndpointArn>
</CreatePlatformEndpointResult>
<ResponseMetadata>
<RequestId>6613341d-3e15-53f7-bf3c-7e56994ba278</RequestId>
</ResponseMetadata>
</CreatePlatformEndpointResponse>"""
LIST_PLATFORM_APPLICATIONS_TEMPLATE = """<ListPlatformApplicationsResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListPlatformApplicationsResult>
<PlatformApplications>
{% for application in applications %}
<member>
<PlatformApplicationArn>{{ application.arn }}</PlatformApplicationArn>
<Attributes>
{% for attribute in application.attributes %}
<entry>
<key>{{ attribute }}</key>
<value>{{ application.attributes[attribute] }}</value>
</entry>
{% endfor %}
</Attributes>
</member>
{% endfor %}
</PlatformApplications>
</ListPlatformApplicationsResult>
<ResponseMetadata>
<RequestId>315a335e-85d8-52df-9349-791283cbb529</RequestId>
</ResponseMetadata>
</ListPlatformApplicationsResponse>"""
DELETE_PLATFORM_APPLICATION_TEMPLATE = """<DeletePlatformApplicationResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>097dac18-7a77-5823-a8dd-e65476dcb037</RequestId>
</ResponseMetadata>
</DeletePlatformApplicationResponse>"""
GET_ENDPOINT_ATTRIBUTES_TEMPLATE = """<GetEndpointAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<GetEndpointAttributesResult>
<Attributes>
{% for attribute in endpoint.attributes %}
<entry>
<key>{{ attribute }}</key>
<value>{{ endpoint.attributes[attribute] }}</value>
</entry>
{% endfor %}
</Attributes>
</GetEndpointAttributesResult>
<ResponseMetadata>
<RequestId>6c725a19-a142-5b77-94f9-1055a9ea04e7</RequestId>
</ResponseMetadata>
</GetEndpointAttributesResponse>"""
LIST_ENDPOINTS_BY_PLATFORM_APPLICATION_TEMPLATE = """<ListEndpointsByPlatformApplicationResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListEndpointsByPlatformApplicationResult>
<Endpoints>
{% for endpoint in endpoints %}
<member>
<EndpointArn>{{ endpoint.arn }}</EndpointArn>
<Attributes>
{% for attribute in endpoint.attributes %}
<entry>
<key>{{ attribute }}</key>
<value>{{ endpoint.attributes[attribute] }}</value>
</entry>
{% endfor %}
</Attributes>
</member>
{% endfor %}
</Endpoints>
</ListEndpointsByPlatformApplicationResult>
<ResponseMetadata>
<RequestId>9a48768c-dac8-5a60-aec0-3cc27ea08d96</RequestId>
</ResponseMetadata>
</ListEndpointsByPlatformApplicationResponse>"""
GET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE = """<GetPlatformApplicationAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<GetPlatformApplicationAttributesResult>
<Attributes>
{% for attribute in application.attributes %}
<entry>
<key>{{ attribute }}</key>
<value>{{ application.attributes[attribute] }}</value>
</entry>
{% endfor %}
</Attributes>
</GetPlatformApplicationAttributesResult>
<ResponseMetadata>
<RequestId>74848df2-87f6-55ed-890c-c7be80442462</RequestId>
</ResponseMetadata>
</GetPlatformApplicationAttributesResponse>"""
PUBLISH_TEMPLATE = """<PublishResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<PublishResult>
<MessageId>{{ message_id }}</MessageId>
</PublishResult>
<ResponseMetadata>
<RequestId>f187a3c1-376f-11df-8963-01868b7c937a</RequestId>
</ResponseMetadata>
</PublishResponse>"""
SET_ENDPOINT_ATTRIBUTES_TEMPLATE = """<SetEndpointAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>2fe0bfc7-3e85-5ee5-a9e2-f58b35e85f6a</RequestId>
</ResponseMetadata>
</SetEndpointAttributesResponse>"""
SET_PLATFORM_APPLICATION_ATTRIBUTES_TEMPLATE = """<SetPlatformApplicationAttributesResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>cf577bcc-b3dc-5463-88f1-3180b9412395</RequestId>
</ResponseMetadata>
</SetPlatformApplicationAttributesResponse>"""
SUBSCRIBE_TEMPLATE = """<SubscribeResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<SubscribeResult>
<SubscriptionArn>{{ subscription.arn }}</SubscriptionArn>
</SubscribeResult>
<ResponseMetadata>
<RequestId>c4407779-24a4-56fa-982c-3d927f93a775</RequestId>
</ResponseMetadata>
</SubscribeResponse>"""
UNSUBSCRIBE_TEMPLATE = """<UnsubscribeResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ResponseMetadata>
<RequestId>18e0ac39-3776-11df-84c0-b93cc1666b84</RequestId>
</ResponseMetadata>
</UnsubscribeResponse>"""
LIST_SUBSCRIPTIONS_TEMPLATE = """<ListSubscriptionsResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListSubscriptionsResult>
<Subscriptions>
{% for subscription in subscriptions %}
<member>
<TopicArn>{{ subscription.topic.arn }}</TopicArn>
<Protocol>{{ subscription.protocol }}</Protocol>
<SubscriptionArn>{{ subscription.arn }}</SubscriptionArn>
<Owner>{{ subscription.account_id }}</Owner>
<Endpoint>{{ subscription.endpoint }}</Endpoint>
</member>
{% endfor %}
</Subscriptions>
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</ListSubscriptionsResult>
<ResponseMetadata>
<RequestId>384ac68d-3775-11df-8963-01868b7c937a</RequestId>
</ResponseMetadata>
</ListSubscriptionsResponse>"""
LIST_SUBSCRIPTIONS_BY_TOPIC_TEMPLATE = """<ListSubscriptionsByTopicResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListSubscriptionsByTopicResult>
<Subscriptions>
{% for subscription in subscriptions %}
<member>
<TopicArn>{{ subscription.topic.arn }}</TopicArn>
<Protocol>{{ subscription.protocol }}</Protocol>
<SubscriptionArn>{{ subscription.arn }}</SubscriptionArn>
<Owner>{{ subscription.account_id }}</Owner>
<Endpoint>{{ subscription.endpoint }}</Endpoint>
</member>
{% endfor %}
</Subscriptions>
{% if next_token %}
<NextToken>{{ next_token }}</NextToken>
{% endif %}
</ListSubscriptionsByTopicResult>
<ResponseMetadata>
<RequestId>384ac68d-3775-11df-8963-01868b7c937a</RequestId>
</ResponseMetadata>
</ListSubscriptionsByTopicResponse>"""
| apache-2.0 |
ostroproject/meta-iotqa | lib/oeqa/runtime/nodejs/soletta_platform_service_upstream.py | 3 | 5862 | import os
import sys
import shutil
import subprocess
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import tag
from get_source import get_test_module_repo
CONST_PATH = os.path.dirname(os.path.realpath(__file__))
def copy_test_files(self):
'''
Copy necessary all files related to testing to target device.
@fn copy_tests_files
@param self
'''
self.local_repo_path = '/tmp/soletta'
self.repo_test_dir = os.path.join(CONST_PATH, 'files')
self.target_path = '/usr/lib/node_modules/'
os.chdir(self.repo_test_dir)
os.mkdir('soletta-tests')
copy_list = ['bindings/nodejs/', 'node_modules',
'lowlevel.js', 'index.js', 'package.json']
for single_file in copy_list:
single_file_path = os.path.join(self.local_repo_path, single_file)
if os.path.isfile(single_file_path):
shutil.copyfile(os.path.join(self.local_repo_path, single_file),
os.path.join(self.repo_test_dir, 'soletta-tests',
single_file))
elif os.path.isdir(single_file_path):
shutil.copytree(os.path.join(self.local_repo_path, single_file),
os.path.join(self.repo_test_dir, 'soletta-tests',
single_file))
os.system('cp %s/solettaplatform/getresult.js \
%s/soletta-tests/bindings/nodejs/tests' %
(self.repo_test_dir, self.repo_test_dir)
)
compact_cmd = 'tar -cf soletta-tests.tar soletta-tests'
os.system(compact_cmd)
cpstatus = self.target.copy_to(
os.path.join(
self.repo_test_dir,
'soletta-tests.tar'
),
self.target_path
)
if cpstatus[0] != 0:
sys.stderr.write(
'\nFail to copy soletta-tests to the target device'
)
sys.exit(1)
self.target.run('cd /usr/lib/node_modules; tar -xf soletta-tests.tar')
cpstatus1 = self.target.copy_to(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'soletta_platform_service_upstream_parser_log.py'),
os.path.join(
self.target_path,
'soletta-tests/bindings/nodejs/'
)
)
if cpstatus1[0] != 0:
sys.stderr.write(
'\nFail to copy soletta_platform_service_upstream_parser_log.py \
to the target device'
)
sys.exit(1)
cpstatus2 = self.target.copy_to(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'update_setup_suite_js.py'),
os.path.join(
self.target_path,
'soletta-tests/bindings/nodejs'
)
)
if cpstatus2[0] != 0:
sys.stderr.write(
'\nFail to copy update_setup_suite_js.py to the target device'
)
sys.exit(1)
@tag(TestType='FVT', FeatureID='IOTOS-1157')
class solettaplatformServiceApiTest(oeRuntimeTest):
'''
@class solettaplatformServiceApiTest
Backup for setup.js, suite.js and update it for testing
'''
def setUp(self):
'''
Copy all files related to testing to device
@fn setup
@param self
'''
# Download the repository of soletta
sys.stdout.write('\nDownloading the repository of soletta...')
sys.stdout.flush()
soletta_url = 'https://github.com/solettaproject/soletta.git'
get_test_module_repo(soletta_url, 'soletta')
sys.stdout.write('\nCopying necessary files to target device...')
sys.stdout.flush()
# Copy all files related to testing to device
copy_test_files(self)
sys.stdout.write(
'\nCopy all files related to testing to target device done!'
)
sys.stdout.flush()
# Update setup.js and suite.js
self.target.run('python %s/soletta-tests/bindings/nodejs/update_setup_suite_js.py' %
self.target_path
)
@tag(CasesNumber=1)
def test_sol_platform_service_api(self):
'''
Execute the soletta upstream test cases.
@fn test_sol_platform_service_api
@param self
'''
sys.stdout.write(
'\nExecuting iotivity-node upstream test cases...'
)
sys.stdout.flush()
run_grunt_cmd = ''.join([
'cd ',
self.target_path,
'soletta-tests/bindings/nodejs; node tests/suite.js'
])
format_result_cmd = ''.join([
'python ',
self.target_path,
'soletta-tests/bindings/nodejs/soletta_platform_service_upstream_parser_log.py'
])
(status, output) = self.target.run(run_grunt_cmd)
sys.stdout.write('\r' + ' ' * 78 + '\r')
sys.stdout.write(''.join(['\n', output]))
sys.stdout.flush()
(status, output) = self.target.run(format_result_cmd)
sys.stdout.write('\r' + ' ' * 78 + '\r')
sys.stderr.write(''.join(['\n', output, '\n']))
sys.stdout.flush()
def tearDown(self):
'''
Clean work: remove all the files downloaded on host and
copied to the target device during the test.
@fn tearDown
@param self
'''
sys.stdout.write("\nClean test files on host")
sys.stdout.flush()
os.system('rm -rf %s/soletta-tests %s/soletta-tests.tar' %
(self.repo_test_dir, self.repo_test_dir)
)
sys.stdout.write("\nClean test files on device")
sys.stdout.flush()
self.target.run('rm -rf /usr/lib/node_modules/soletta-tests.tar')
self.target.run('rm -rf /usr/lib/node_modules/soletta-tests')
sys.stdout.write('\nClean all files related to testing done!!\n')
sys.stdout.flush()
##
# @}
# @}
##
| mit |
alfonsokim/nupic | tests/unit/nupic/encoders/scalarspace_test.py | 10 | 1768 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for scalar space encoder"""
import unittest2 as unittest
from nupic.encoders.scalar_space import ScalarSpaceEncoder, DeltaEncoder
class ScalarSpaceEncoderTest(unittest.TestCase):
'''Unit tests for ScalarSpaceEncoder class'''
def testScalarSpaceEncoder(self):
"""scalar space encoder"""
# use of forced=True is not recommended, but used in the example for readibility, see scalar.py
sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"delta",
forced=True)
self.assertTrue(isinstance(sse, DeltaEncoder))
sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"absolute",
forced=True)
self.assertFalse(isinstance(sse, DeltaEncoder))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
edgarRd/incubator-airflow | airflow/ti_deps/deps/base_ti_dep.py | 15 | 5920 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from airflow.utils.db import provide_session
class BaseTIDep(object):
"""
Abstract base class for dependencies that must be satisfied in order for task
instances to run. For example, a task that can only run if a certain number of its
upstream tasks succeed. This is an abstract class and must be subclassed to be used.
"""
# If this dependency can be ignored by a context in which it is added to. Needed
# because some dependencies should never be ignoreable in their contexts.
IGNOREABLE = False
# Whether this dependency is not a global task instance dependency but specific
# to some tasks (e.g. depends_on_past is not specified by all tasks).
IS_TASK_DEP = False
def __init__(self):
pass
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __repr__(self):
return "<TIDep({self.name})>".format(self=self)
@property
def name(self):
"""
The human-readable name for the dependency. Use the classname as the default name
if this method is not overridden in the subclass.
"""
return getattr(self, 'NAME', self.__class__.__name__)
def _get_dep_statuses(self, ti, session, dep_context=None):
"""
Abstract method that returns an iterable of TIDepStatus objects that describe
whether the given task instance has this dependency met.
For example a subclass could return an iterable of TIDepStatus objects, each one
representing if each of the passed in task's upstream tasks succeeded or not.
:param ti: the task instance to get the dependency status for
:type ti: TaskInstance
:param session: database session
:type session: Session
:param dep_context: the context for which this dependency should be evaluated for
:type dep_context: DepContext
"""
raise NotImplementedError
@provide_session
def get_dep_statuses(self, ti, session, dep_context=None):
"""
Wrapper around the private _get_dep_statuses method that contains some global
checks for all dependencies.
:param ti: the task instance to get the dependency status for
:type ti: TaskInstance
:param session: database session
:type session: Session
:param dep_context: the context for which this dependency should be evaluated for
:type dep_context: DepContext
"""
# this avoids a circular dependency
from airflow.ti_deps.dep_context import DepContext
if dep_context is None:
dep_context = DepContext()
if self.IGNOREABLE and dep_context.ignore_all_deps:
yield self._passing_status(
reason="Context specified all dependencies should be ignored.")
return
if self.IS_TASK_DEP and dep_context.ignore_task_deps:
yield self._passing_status(
reason="Context specified all task dependencies should be ignored.")
return
for dep_status in self._get_dep_statuses(ti, session, dep_context):
yield dep_status
@provide_session
def is_met(self, ti, session, dep_context=None):
"""
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: TaskInstance
:param session: database session
:type session: Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context))
@provide_session
def get_failure_reasons(self, ti, session, dep_context=None):
"""
Returns an iterable of strings that explain why this dependency wasn't met.
:param ti: the task instance to see if this dependency is met for
:type ti: TaskInstance
:param session: database session
:type session: Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
for dep_status in self.get_dep_statuses(ti, session, dep_context):
if not dep_status.passed:
yield dep_status.reason
def _failing_status(self, reason=''):
return TIDepStatus(self.name, False, reason)
def _passing_status(self, reason=''):
return TIDepStatus(self.name, True, reason)
# Dependency status for a specific task instance indicating whether or not the task
# instance passed the dependency.
TIDepStatus = namedtuple('TIDepStatus', ['dep_name', 'passed', 'reason'])
| apache-2.0 |
johnobrien/PyPractice | pipeg/tabulator3.py | 4 | 2716 | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import sys
if sys.version_info[:2] < (3, 2):
from xml.sax.saxutils import escape
else:
from html import escape
WINNERS = ("Nikolai Andrianov", "Matt Biondi", "Bjørn Dæhlie",
"Birgit Fischer", "Sawao Kato", "Larisa Latynina", "Carl Lewis",
"Michael Phelps", "Mark Spitz", "Jenny Thompson")
def main():
htmlLayout = Layout(html_tabulator)
for rows in range(2, 6):
print(htmlLayout.tabulate(rows, WINNERS))
textLayout = Layout(text_tabulator)
for rows in range(2, 6):
print(textLayout.tabulate(rows, WINNERS))
class Layout:
def __init__(self, tabulator):
self.tabulator = tabulator
def tabulate(self, rows, items):
return self.tabulator(rows, items)
def html_tabulator(rows, items):
columns, remainder = divmod(len(items), rows)
if remainder:
columns += 1
column = 0
table = ['<table border="1">\n']
for item in items:
if column == 0:
table.append("<tr>")
table.append("<td>{}</td>".format(escape(str(item))))
column += 1
if column == columns:
table.append("</tr>\n")
column %= columns
if table[-1][-1] != "\n":
table.append("</tr>\n")
table.append("</table>\n")
return "".join(table)
def text_tabulator(rows, items):
columns, remainder = divmod(len(items), rows)
if remainder:
columns += 1
remainder = (rows * columns) - len(items)
if remainder == columns:
remainder = 0
column = columnWidth = 0
for item in items:
columnWidth = max(columnWidth, len(item))
columnDivider = ("-" * (columnWidth + 2)) + "+"
divider = "+" + (columnDivider * columns) + "\n"
table = [divider]
for item in items + (("",) * remainder):
if column == 0:
table.append("|")
table.append(" {:<{}} |".format(item, columnWidth))
column += 1
if column == columns:
table.append("\n")
column %= columns
table.append(divider)
return "".join(table)
if __name__ == "__main__":
main()
| mit |
cloudera/hue | desktop/libs/hadoop/src/hadoop/fs/fsutils.py | 2 | 3099 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import stat as stat_module
logger = logging.getLogger(__name__)
def do_overwrite_save(fs, path, data):
def copy_data(path_dest):
try:
fs.create(path_dest, overwrite=False, data=data)
logging.info("Wrote to " + path_dest)
except Exception as e:
# An error occurred in writing, we should clean up
# the tmp file if it exists, before re-raising
try:
fs.remove(path_dest, skip_trash=True)
except:
logger.exception('failed to remove %s' % path_dest)
raise e
_do_overwrite(fs, path, copy_data)
def remove_header(fs, path):
def copy_data(path_dest):
fs.copyfile(path, path_dest, skip_header=True)
_do_overwrite(fs, path, copy_data)
def _do_overwrite(fs, path, copy_data):
"""
Atomically (best-effort) save the specified data to the given path
on the filesystem.
"""
# TODO(todd) Should probably do an advisory permissions check here to
# see if we're likely to fail (eg make sure we own the file
# and can write to the dir)
# First write somewhat-kinda-atomically to a staging file
# so that if we fail, we don't clobber the old one
path_dest = path + "._hue_new"
# Copy the data to destination
copy_data(path_dest)
# Try to match the permissions and ownership of the old file
cur_stats = fs.stats(path)
try:
fs.do_as_superuser(fs.chmod, path_dest, stat_module.S_IMODE(cur_stats['mode']))
except:
logging.exception("Could not chmod new file %s to match old file %s" % (path_dest, path))
# but not the end of the world - keep going
try:
fs.do_as_superuser(fs.chown, path_dest, cur_stats['user'], cur_stats['group'])
except:
logging.exception("Could not chown new file %s to match old file %s" % (path_dest, path))
# but not the end of the world - keep going
# Now delete the old - nothing we can do here to recover
fs.remove(path, skip_trash=True)
# Now move the new one into place
# If this fails, then we have no reason to assume
# we can do anything to recover, since we know the
# destination shouldn't already exist (we just deleted it above)
fs.rename(path_dest, path)
| apache-2.0 |
archerjd/modularcombat_v3.0.0 | 3.0.0/src/thirdparty/protobuf-2.3.0/gtest/test/gtest_xml_output_unittest.py | 306 | 9711 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="15" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| artistic-2.0 |
yekexuan/shadowsocks | shadowsocks/crypto/table.py | 1044 | 8108 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
| apache-2.0 |
behnam/python-unidecode | unidecode/x013.py | 252 | 4247 | data = (
'ja', # 0x00
'ju', # 0x01
'ji', # 0x02
'jaa', # 0x03
'jee', # 0x04
'je', # 0x05
'jo', # 0x06
'jwa', # 0x07
'ga', # 0x08
'gu', # 0x09
'gi', # 0x0a
'gaa', # 0x0b
'gee', # 0x0c
'ge', # 0x0d
'go', # 0x0e
'[?]', # 0x0f
'gwa', # 0x10
'[?]', # 0x11
'gwi', # 0x12
'gwaa', # 0x13
'gwee', # 0x14
'gwe', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'gga', # 0x18
'ggu', # 0x19
'ggi', # 0x1a
'ggaa', # 0x1b
'ggee', # 0x1c
'gge', # 0x1d
'ggo', # 0x1e
'[?]', # 0x1f
'tha', # 0x20
'thu', # 0x21
'thi', # 0x22
'thaa', # 0x23
'thee', # 0x24
'the', # 0x25
'tho', # 0x26
'thwa', # 0x27
'cha', # 0x28
'chu', # 0x29
'chi', # 0x2a
'chaa', # 0x2b
'chee', # 0x2c
'che', # 0x2d
'cho', # 0x2e
'chwa', # 0x2f
'pha', # 0x30
'phu', # 0x31
'phi', # 0x32
'phaa', # 0x33
'phee', # 0x34
'phe', # 0x35
'pho', # 0x36
'phwa', # 0x37
'tsa', # 0x38
'tsu', # 0x39
'tsi', # 0x3a
'tsaa', # 0x3b
'tsee', # 0x3c
'tse', # 0x3d
'tso', # 0x3e
'tswa', # 0x3f
'tza', # 0x40
'tzu', # 0x41
'tzi', # 0x42
'tzaa', # 0x43
'tzee', # 0x44
'tze', # 0x45
'tzo', # 0x46
'[?]', # 0x47
'fa', # 0x48
'fu', # 0x49
'fi', # 0x4a
'faa', # 0x4b
'fee', # 0x4c
'fe', # 0x4d
'fo', # 0x4e
'fwa', # 0x4f
'pa', # 0x50
'pu', # 0x51
'pi', # 0x52
'paa', # 0x53
'pee', # 0x54
'pe', # 0x55
'po', # 0x56
'pwa', # 0x57
'rya', # 0x58
'mya', # 0x59
'fya', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
' ', # 0x61
'.', # 0x62
',', # 0x63
';', # 0x64
':', # 0x65
':: ', # 0x66
'?', # 0x67
'//', # 0x68
'1', # 0x69
'2', # 0x6a
'3', # 0x6b
'4', # 0x6c
'5', # 0x6d
'6', # 0x6e
'7', # 0x6f
'8', # 0x70
'9', # 0x71
'10+', # 0x72
'20+', # 0x73
'30+', # 0x74
'40+', # 0x75
'50+', # 0x76
'60+', # 0x77
'70+', # 0x78
'80+', # 0x79
'90+', # 0x7a
'100+', # 0x7b
'10,000+', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'a', # 0xa0
'e', # 0xa1
'i', # 0xa2
'o', # 0xa3
'u', # 0xa4
'v', # 0xa5
'ga', # 0xa6
'ka', # 0xa7
'ge', # 0xa8
'gi', # 0xa9
'go', # 0xaa
'gu', # 0xab
'gv', # 0xac
'ha', # 0xad
'he', # 0xae
'hi', # 0xaf
'ho', # 0xb0
'hu', # 0xb1
'hv', # 0xb2
'la', # 0xb3
'le', # 0xb4
'li', # 0xb5
'lo', # 0xb6
'lu', # 0xb7
'lv', # 0xb8
'ma', # 0xb9
'me', # 0xba
'mi', # 0xbb
'mo', # 0xbc
'mu', # 0xbd
'na', # 0xbe
'hna', # 0xbf
'nah', # 0xc0
'ne', # 0xc1
'ni', # 0xc2
'no', # 0xc3
'nu', # 0xc4
'nv', # 0xc5
'qua', # 0xc6
'que', # 0xc7
'qui', # 0xc8
'quo', # 0xc9
'quu', # 0xca
'quv', # 0xcb
'sa', # 0xcc
's', # 0xcd
'se', # 0xce
'si', # 0xcf
'so', # 0xd0
'su', # 0xd1
'sv', # 0xd2
'da', # 0xd3
'ta', # 0xd4
'de', # 0xd5
'te', # 0xd6
'di', # 0xd7
'ti', # 0xd8
'do', # 0xd9
'du', # 0xda
'dv', # 0xdb
'dla', # 0xdc
'tla', # 0xdd
'tle', # 0xde
'tli', # 0xdf
'tlo', # 0xe0
'tlu', # 0xe1
'tlv', # 0xe2
'tsa', # 0xe3
'tse', # 0xe4
'tsi', # 0xe5
'tso', # 0xe6
'tsu', # 0xe7
'tsv', # 0xe8
'wa', # 0xe9
'we', # 0xea
'wi', # 0xeb
'wo', # 0xec
'wu', # 0xed
'wv', # 0xee
'ya', # 0xef
'ye', # 0xf0
'yi', # 0xf1
'yo', # 0xf2
'yu', # 0xf3
'yv', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
Juniper/nova | nova/scheduler/filters/trusted_filter.py | 2 | 9200 | # Copyright (c) 2012 Intel, Inc.
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter to add support for Trusted Computing Pools (EXPERIMENTAL).
Filter that only schedules tasks on a host if the integrity (trust)
of that host matches the trust requested in the ``extra_specs`` for the
flavor. The ``extra_specs`` will contain a key/value pair where the
key is ``trust``. The value of this pair (``trusted``/``untrusted``) must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
are in the ``nova.conf`` file in a separate ``trust`` section. For example,
the config file will look something like:
[DEFAULT]
debug=True
...
[trust]
server=attester.mynetwork.com
Details on the specific parameters can be found in the file
``trust_attest.py``.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
https://github.com/OpenAttestation/OpenAttestation
"""
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import nova.conf
from nova import context
from nova.i18n import _LW
from nova import objects
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class AttestationService(object):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
self.api_url = CONF.trusted_computing.attestation_api_url
self.host = CONF.trusted_computing.attestation_server
self.port = CONF.trusted_computing.attestation_port
self.auth_blob = CONF.trusted_computing.attestation_auth_blob
self.key_file = None
self.cert_file = None
self.ca_file = CONF.trusted_computing.attestation_server_ca_file
self.request_count = 100
# If the CA file is not provided, let's check the cert if verification
# asked
self.verify = (not CONF.trusted_computing.attestation_insecure_ssl
and self.ca_file or True)
self.cert = (self.cert_file, self.key_file)
def _do_request(self, method, action_url, body, headers):
# Connects to the server and issues a request.
# :returns: result data
# :raises: IOError if the request fails
action_url = "https://%s:%d%s/%s" % (self.host, self.port,
self.api_url, action_url)
try:
res = requests.request(method, action_url, data=body,
headers=headers, cert=self.cert,
verify=self.verify)
status_code = res.status_code
if status_code in (requests.codes.OK,
requests.codes.CREATED,
requests.codes.ACCEPTED,
requests.codes.NO_CONTENT):
try:
return requests.codes.OK, jsonutils.loads(res.text)
except (TypeError, ValueError):
return requests.codes.OK, res.text
return status_code, None
except requests.exceptions.RequestException:
return IOError, None
def _request(self, cmd, subcmd, hosts):
body = {}
body['count'] = len(hosts)
body['hosts'] = hosts
cooked = jsonutils.dumps(body)
headers = {}
headers['content-type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.auth_blob:
headers['x-auth-blob'] = self.auth_blob
status, res = self._do_request(cmd, subcmd, cooked, headers)
return status, res
def do_attestation(self, hosts):
"""Attests compute nodes through OAT service.
:param hosts: hosts list to be attested
:returns: dictionary for trust level and validate time
"""
result = None
status, data = self._request("POST", "PollHosts", hosts)
if data is not None:
result = data.get('hosts')
return result
class ComputeAttestationCache(object):
"""Cache for compute node attestation
Cache compute node's trust level for sometime,
if the cache is out of date, poll OAT service to flush the
cache.
OAT service may have cache also. OAT service's cache valid time
should be set shorter than trusted filter's cache valid time.
"""
def __init__(self):
self.attestservice = AttestationService()
self.compute_nodes = {}
admin = context.get_admin_context()
# Fetch compute node list to initialize the compute_nodes,
# so that we don't need poll OAT service one by one for each
# host in the first round that scheduler invokes us.
computes = objects.ComputeNodeList.get_all(admin)
for compute in computes:
host = compute.hypervisor_hostname
self._init_cache_entry(host)
def _cache_valid(self, host):
cachevalid = False
if host in self.compute_nodes:
node_stats = self.compute_nodes.get(host)
if not timeutils.is_older_than(
node_stats['vtime'],
CONF.trusted_computing.attestation_auth_timeout):
cachevalid = True
return cachevalid
def _init_cache_entry(self, host):
self.compute_nodes[host] = {
'trust_lvl': 'unknown',
'vtime': timeutils.normalize_time(
timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
def _invalidate_caches(self):
for host in self.compute_nodes:
self._init_cache_entry(host)
def _update_cache_entry(self, state):
entry = {}
host = state['host_name']
entry['trust_lvl'] = state['trust_lvl']
try:
# Normalize as naive object to interoperate with utcnow().
entry['vtime'] = timeutils.normalize_time(
timeutils.parse_isotime(state['vtime']))
except ValueError:
try:
# Mt. Wilson does not necessarily return an ISO8601 formatted
# `vtime`, so we should try to parse it as a string formatted
# datetime.
vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
entry['vtime'] = timeutils.normalize_time(vtime)
except ValueError:
# Mark the system as un-trusted if get invalid vtime.
entry['trust_lvl'] = 'unknown'
entry['vtime'] = timeutils.utcnow()
self.compute_nodes[host] = entry
def _update_cache(self):
self._invalidate_caches()
states = self.attestservice.do_attestation(
list(self.compute_nodes.keys()))
if states is None:
return
for state in states:
self._update_cache_entry(state)
def get_host_attestation(self, host):
"""Check host's trust level."""
if host not in self.compute_nodes:
self._init_cache_entry(host)
if not self._cache_valid(host):
self._update_cache()
level = self.compute_nodes.get(host).get('trust_lvl')
return level
class ComputeAttestation(object):
def __init__(self):
self.caches = ComputeAttestationCache()
def is_trusted(self, host, trust):
level = self.caches.get_host_attestation(host)
return trust == level
class TrustedFilter(filters.BaseHostFilter):
"""Trusted filter to support Trusted Compute Pools."""
def __init__(self):
self.compute_attestation = ComputeAttestation()
msg = _LW('The TrustedFilter is deprecated as it has been marked '
'experimental for some time with no tests. It will be '
'removed in the 17.0.0 Queens release.')
versionutils.report_deprecated_feature(LOG, msg)
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, spec_obj):
instance_type = spec_obj.flavor
extra = (instance_type.extra_specs
if 'extra_specs' in instance_type else {})
trust = extra.get('trust:trusted_host')
host = host_state.nodename
if trust:
return self.compute_attestation.is_trusted(host, trust)
return True
| apache-2.0 |
DefyVentures/edx-platform | lms/djangoapps/dashboard/management/commands/tests/test_git_add_course.py | 101 | 8567 | """
Provide tests for git_add_course management command.
"""
import logging
import os
import shutil
import StringIO
import subprocess
import unittest
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import dashboard.git_import as git_import
from dashboard.git_import import GitImportError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
TEST_MONGODB_LOG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'user': '',
'password': '',
'db': 'test_xlog',
}
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
@override_settings(MONGODB_LOG=TEST_MONGODB_LOG)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
class TestGitAddCourse(ModuleStoreTestCase):
"""
Tests the git_add_course management command for proper functions.
"""
TEST_REPO = 'https://github.com/mitocw/edx4edx_lite.git'
TEST_COURSE = 'MITx/edx4edx/edx4edx'
TEST_BRANCH = 'testing_do_not_delete'
TEST_BRANCH_COURSE = SlashSeparatedCourseKey('MITx', 'edx4edx_branch', 'edx4edx')
GIT_REPO_DIR = getattr(settings, 'GIT_REPO_DIR')
def assertCommandFailureRegexp(self, regex, *args):
"""
Convenience function for testing command failures
"""
with self.assertRaises(SystemExit):
with self.assertRaisesRegexp(CommandError, regex):
call_command('git_add_course', *args,
stderr=StringIO.StringIO())
def test_command_args(self):
"""
Validate argument checking
"""
self.assertCommandFailureRegexp(
'This script requires at least one argument, the git URL')
self.assertCommandFailureRegexp(
'Expected no more than three arguments; recieved 4',
'blah', 'blah', 'blah', 'blah')
self.assertCommandFailureRegexp(
'Repo was not added, check log output for details',
'blah')
# Test successful import from command
if not os.path.isdir(self.GIT_REPO_DIR):
os.mkdir(self.GIT_REPO_DIR)
self.addCleanup(shutil.rmtree, self.GIT_REPO_DIR)
# Make a course dir that will be replaced with a symlink
# while we are at it.
if not os.path.isdir(self.GIT_REPO_DIR / 'edx4edx'):
os.mkdir(self.GIT_REPO_DIR / 'edx4edx')
call_command('git_add_course', self.TEST_REPO,
self.GIT_REPO_DIR / 'edx4edx_lite')
# Test with all three args (branch)
call_command('git_add_course', self.TEST_REPO,
self.GIT_REPO_DIR / 'edx4edx_lite',
self.TEST_BRANCH)
def test_add_repo(self):
"""
Various exit path tests for test_add_repo
"""
with self.assertRaisesRegexp(GitImportError, GitImportError.NO_DIR):
git_import.add_repo(self.TEST_REPO, None, None)
os.mkdir(self.GIT_REPO_DIR)
self.addCleanup(shutil.rmtree, self.GIT_REPO_DIR)
with self.assertRaisesRegexp(GitImportError, GitImportError.URL_BAD):
git_import.add_repo('foo', None, None)
with self.assertRaisesRegexp(GitImportError, GitImportError.CANNOT_PULL):
git_import.add_repo('file:///foobar.git', None, None)
# Test git repo that exists, but is "broken"
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
with self.assertRaisesRegexp(GitImportError, GitImportError.BAD_REPO):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
def test_detached_repo(self):
"""
Test repo that is in detached head state.
"""
repo_dir = self.GIT_REPO_DIR
# Test successful import from command
try:
os.mkdir(repo_dir)
except OSError:
pass
self.addCleanup(shutil.rmtree, repo_dir)
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
subprocess.check_output(['git', 'checkout', 'HEAD~2', ],
stderr=subprocess.STDOUT,
cwd=repo_dir / 'edx4edx_lite')
with self.assertRaisesRegexp(GitImportError, GitImportError.CANNOT_PULL):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
def test_branching(self):
"""
Exercise branching code of import
"""
repo_dir = self.GIT_REPO_DIR
# Test successful import from command
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
# Checkout non existent branch
with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')
# Checkout new branch
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
def_ms = modulestore()
# Validate that it is different than master
self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
# Attempt to check out the same branch again to validate branch choosing
# works
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
# Delete to test branching back to master
def_ms.delete_course(self.TEST_BRANCH_COURSE, ModuleStoreEnum.UserID.test)
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
'master')
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))
def test_branch_exceptions(self):
"""
This wil create conditions to exercise bad paths in the switch_branch function.
"""
# create bare repo that we can mess with and attempt an import
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
# Build repo dir
repo_dir = self.GIT_REPO_DIR
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
rdir = '{0}/bare'.format(repo_dir)
with self.assertRaisesRegexp(GitImportError, GitImportError.BAD_REPO):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
# Get logger for checking strings in logs
output = StringIO.StringIO()
test_log_handler = logging.StreamHandler(output)
test_log_handler.setLevel(logging.DEBUG)
glog = git_import.log
glog.addHandler(test_log_handler)
# Move remote so fetch fails
shutil.move(bare_repo, '{0}/not_bare.git'.format(settings.TEST_ROOT))
try:
git_import.switch_branch('master', rdir)
except GitImportError:
self.assertIn('Unable to fetch remote', output.getvalue())
shutil.move('{0}/not_bare.git'.format(settings.TEST_ROOT), bare_repo)
output.truncate(0)
# Replace origin with a different remote
subprocess.check_output(
['git', 'remote', 'rename', 'origin', 'blah', ],
stderr=subprocess.STDOUT, cwd=rdir
)
with self.assertRaises(GitImportError):
git_import.switch_branch('master', rdir)
self.assertIn('Getting a list of remote branches failed', output.getvalue())
| agpl-3.0 |
kaloix/home-sensor | server.py | 2 | 21927 | #!/usr/bin/env python3
import collections
import configparser
import contextlib
import csv
import datetime
import itertools
import json
import locale
import logging
import queue
import shutil
import time
import dateutil.rrule
import matplotlib.dates
import matplotlib.pyplot
import pysolar
import pytz
import api
import notify
import utility
ALLOWED_DOWNTIME = datetime.timedelta(minutes=30)
COLOR_CYCLE = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
DATA_DIR = 'data/'
INTERVAL = 60
PAUSE_WARN_FAILURE = 30 * 24 * 60 * 60
PAUSE_WARN_VALUE = 24 * 60 * 60
PLOT_INTERVAL = 10 * 60
RECORD_DAYS = 7
SUMMARY_DAYS = 183
TIMEZONE = pytz.timezone('Europe/Berlin')
WEB_DIR = '/home/kaloix/html/sensor/'
config = configparser.ConfigParser()
groups = collections.defaultdict(collections.OrderedDict)
inbox = queue.Queue()
now = datetime.datetime.now(tz=datetime.timezone.utc)
Record = collections.namedtuple('Record', 'timestamp value')
Summary = collections.namedtuple('Summary', 'date minimum maximum')
Uptime = collections.namedtuple('Uptime', 'date value')
def main():
global now
utility.logging_config()
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
config.read('config.ini')
with open('sensor.json') as json_file:
sensor_json = json_file.read()
devices = json.loads(sensor_json,
object_pairs_hook=collections.OrderedDict)
for device in devices:
for kind, attr in device['output'].items():
if kind == 'temperature':
groups[attr['group']][attr['name']] = Temperature(
attr['low'],
attr['high'],
attr['name'],
device['input']['interval'],
attr['fail-notify'])
elif kind == 'switch':
groups[attr['group']][attr['name']] = Switch(
attr['name'],
device['input']['interval'],
attr['fail-notify'])
with website(), api.ApiServer(accept_record), \
notify.MailSender(
config['email']['source_address'],
config['email']['admin_address'],
config['email']['user_address'],
config['email'].getboolean('enable_email')) as mail:
while True:
# get new record
start = time.perf_counter()
now = datetime.datetime.now(tz=datetime.timezone.utc)
record_counter = int()
with contextlib.suppress(queue.Empty):
while True:
group, name, record = inbox.get(block=False)
groups[group][name].save(record)
record_counter += 1
# update content
for group, series_dict in groups.items():
for series in series_dict.values():
if series.error:
mail.queue(series.error, PAUSE_WARN_FAILURE)
if series.warning:
mail.queue(series.warning, PAUSE_WARN_VALUE)
detail_html(group, series_dict.values())
with contextlib.suppress(utility.CallDenied):
make_plots()
mail.send_all()
# log processing
utility.memory_check()
logging.info('updated website in {:.3f}s, {} new records'.format(
time.perf_counter() - start, record_counter))
time.sleep(INTERVAL)
@contextlib.contextmanager
def website():
shutil.copy('static/favicon.png', WEB_DIR)
shutil.copy('static/htaccess', WEB_DIR + '.htaccess')
shutil.copy('static/index.html', WEB_DIR)
try:
yield
finally:
logging.info('disable website')
shutil.copy('static/htaccess_maintenance', WEB_DIR + '.htaccess')
def accept_record(group, name, timestamp, value):
timestamp = datetime.datetime.fromtimestamp(int(timestamp),
tz=datetime.timezone.utc)
logging.info('{}: {} / {}'.format(name, timestamp, value))
filename = '{}/{}_{}.csv'.format(DATA_DIR, name,
timestamp.astimezone(TIMEZONE).year)
with open(filename, mode='a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow((int(timestamp.timestamp()), value))
inbox.put((group, name, Record(timestamp, value)))
def detail_html(group, series_list):
text = list()
text.append('<ul>')
for series in series_list:
text.append('<li>{}</li>'.format(series))
text.append('</ul>')
values = '\n'.join(text)
filename = '{}{}.html'.format(WEB_DIR, group)
with open(filename, mode='w') as html_file:
html_file.write(values)
@utility.allow_every_x_seconds(PLOT_INTERVAL)
def make_plots():
for group, series_dict in groups.items():
# FIXME svg backend has memory leak in matplotlib 1.4.3
plot_history(series_dict.values(), '{}{}.png'.format(WEB_DIR, group))
def _nighttime(count, date_time):
date_time -= datetime.timedelta(days=count)
sun_change = list()
for c in range(0, count + 1):
date_time += datetime.timedelta(days=1)
sun_change.extend(pysolar.util.get_sunrise_sunset(
49.2, 11.08, date_time))
sun_change = sun_change[1:-1]
for r in range(0, count):
yield sun_change[2 * r], sun_change[2 * r + 1]
def _plot_records(series_list, days):
color_iter = iter(COLOR_CYCLE)
for series in series_list:
color = next(color_iter)
if type(series) is Temperature:
parts = list()
for record in series.day if days == 1 else series.records:
if (not parts or record.timestamp - parts[-1][-1].timestamp >
ALLOWED_DOWNTIME):
parts.append(list())
parts[-1].append(record)
for part in parts:
timestamps, values = zip(*part)
matplotlib.pyplot.plot(timestamps, values, label=series.name,
linewidth=2, color=color, zorder=3)
elif type(series) is Switch:
for start, end in series.segments(series.records):
matplotlib.pyplot.axvspan(start, end, label=series.name,
color=color, alpha=0.5, zorder=1)
for sunset, sunrise in _nighttime(days + 1, now):
matplotlib.pyplot.axvspan(sunset, sunrise, label='Nacht', hatch='//',
facecolor='0.9', edgecolor='0.8', zorder=0)
matplotlib.pyplot.xlim(now - datetime.timedelta(days), now)
matplotlib.pyplot.ylabel('Temperatur °C')
ax = matplotlib.pyplot.gca() # FIXME not available in mplrc 1.4.3
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
def _plot_summary(series_list):
ax1 = matplotlib.pyplot.gca() # FIXME not available in mplrc 1.4.3
ax2 = ax1.twinx()
color_iter = iter(COLOR_CYCLE)
switch = False
for series in series_list:
color = next(color_iter)
if type(series) is Temperature:
parts = list()
for summary in series.summary:
if (not parts or summary.date - parts[-1][-1].date >
datetime.timedelta(days=7)):
parts.append(list())
parts[-1].append(summary)
for part in parts:
dates, mins, maxs = zip(*part)
ax1.fill_between(dates, mins, maxs, label=series.name,
color=color, alpha=0.5, interpolate=True,
zorder=0)
elif type(series) is Switch:
switch = True
dates, values = zip(*series.summary)
ax2.plot(dates, values, color=color,
marker='o', linestyle='', zorder=1)
today = now.astimezone(TIMEZONE).date()
matplotlib.pyplot.xlim(today - datetime.timedelta(days=SUMMARY_DAYS),
today)
ax1.set_ylabel('Temperatur °C')
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position('right')
if switch:
ax2.set_ylabel('Laufzeit h')
ax2.yaxis.tick_left()
ax2.yaxis.set_label_position('left')
ax2.grid(False)
else:
ax2.set_visible(False)
def plot_history(series_list, file):
fig = matplotlib.pyplot.figure(figsize=(12, 7))
# last week
ax = matplotlib.pyplot.subplot(312)
_plot_records(series_list, RECORD_DAYS)
frame_start = now - datetime.timedelta(days=RECORD_DAYS)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%a.'))
ax.xaxis.set_ticks(_day_locator(frame_start, now, TIMEZONE))
ax.xaxis.set_ticks(_hour_locator(frame_start, now, 6, TIMEZONE),
minor=True)
handles, labels = ax.get_legend_handles_labels()
# last day
ax = matplotlib.pyplot.subplot(311)
_plot_records(series_list, 1)
matplotlib.pyplot.legend(
handles=list(collections.OrderedDict(zip(labels, handles)).values()),
loc='lower left', bbox_to_anchor=(0, 1), ncol=5, frameon=False)
frame_start = now - datetime.timedelta(days=1)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H'))
ax.xaxis.set_ticks(_hour_locator(frame_start, now, 2, TIMEZONE))
ax.xaxis.set_minor_locator(matplotlib.dates.HourLocator())
# summary
ax = matplotlib.pyplot.subplot(313)
_plot_summary(series_list)
frame_start = now - datetime.timedelta(days=SUMMARY_DAYS)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b.'))
ax.xaxis.set_ticks(_month_locator(frame_start, now, TIMEZONE))
ax.xaxis.set_ticks(_week_locator(frame_start, now, TIMEZONE), minor=True)
# save file
matplotlib.pyplot.savefig(file, bbox_inches='tight')
matplotlib.pyplot.close()
# matplotlib.dates.RRuleLocator is bugged at dst transitions
# http://matplotlib.org/api/dates_api.html#matplotlib.dates.RRuleLocator
# https://github.com/matplotlib/matplotlib/issues/2737/
# https://github.com/dateutil/dateutil/issues/102
def _month_locator(start, end, tz):
lower = start.astimezone(tz).date().replace(day=1)
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.MONTHLY,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _week_locator(start, end, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.WEEKLY,
byweekday=dateutil.rrule.MO,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _day_locator(start, end, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.DAILY,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _hour_locator(start, end, step, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).replace(tzinfo=None)
rule = dateutil.rrule.rrule(dateutil.rrule.HOURLY,
byhour=range(0, 24, step),
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _universal_parser(value):
if value == 'False':
return False
elif value == 'True':
return True
else:
return float(value)
def _format_timedelta(td):
ret = list()
hours = td.days * 24 + td.seconds // 3600
if hours:
ret.append(str(hours))
ret.append('Stunde' if hours == 1 else 'Stunden')
minutes = (td.seconds // 60) % 60
ret.append(str(minutes))
ret.append('Minute' if minutes == 1 else 'Minuten')
return ' '.join(ret)
def _format_timestamp(ts):
ts = ts.astimezone(TIMEZONE)
local_now = now.astimezone(TIMEZONE)
if ts.date() == local_now.date():
return 'um {:%H:%M} Uhr'.format(ts)
if local_now.date() - ts.date() == datetime.timedelta(days=1):
return 'gestern um {:%H:%M} Uhr'.format(ts)
if local_now.date() - ts.date() < datetime.timedelta(days=7):
return 'am {:%A um %H:%M} Uhr'.format(ts)
if ts.year == local_now.year:
return 'am {:%d. %B um %H:%M} Uhr'.format(ts)
return 'am {:%d. %B %Y um %H:%M} Uhr'.format(ts)
def _format_temperature(record, low, high):
if not record:
return 'Keine Daten empfangen'
text = '{:.1f} °C {}'.format(record.value,
_format_timestamp(record.timestamp))
if low <= record.value <= high:
return text
return '<mark>{}</mark>'.format(text)
def _format_switch(record):
if not record:
return 'Keine Daten empfangen'
return '{} {}'.format('Ein' if record.value else 'Aus',
_format_timestamp(record.timestamp))
class Series(object):
text = None
def __init__(self, name, interval, fail_notify):
self.name = name
self.interval = datetime.timedelta(seconds=interval)
self.notify = fail_notify
self.fail_status = False
self.fail_counter = int()
self.records = collections.deque()
self.summary = collections.deque()
self._read(now.year - 1)
self._read(now.year)
self._clear()
def __str__(self):
ret = list()
first, *lines = self.text
lines.append('Aktualisierung alle {}'.format(
_format_timedelta(self.interval)))
ret.append('<strong>{}</strong>'.format(first))
ret.append('<ul>')
for line in lines:
ret.append('<li>{}</li>'.format(line))
ret.append('</ul>')
return '\n'.join(ret)
def _append(self, record):
if self.records and record.timestamp <= self.records[-1].timestamp:
raise OlderThanPreviousError('{}: previous {}, new {}'.format(
self.name, self.records[-1].timestamp.timestamp(),
record.timestamp.timestamp()))
self.records.append(record)
if (len(self.records) >= 3 and self.records[-3].value ==
self.records[-2].value == self.records[-1].value and
self.records[-1].timestamp - self.records[-3].timestamp <
ALLOWED_DOWNTIME):
del self.records[-2]
def _clear(self):
while (self.records and self.records[0].timestamp < now -
datetime.timedelta(RECORD_DAYS)):
self.records.popleft()
while (self.summary and self.summary[0].date < (now -
datetime.timedelta(SUMMARY_DAYS)).astimezone(TIMEZONE).date()):
self.summary.popleft()
def _read(self, year):
filename = '{}/{}_{}.csv'.format(DATA_DIR, self.name, year)
try:
with open(filename, newline='') as csv_file:
for row in csv.reader(csv_file):
timestamp = datetime.datetime.fromtimestamp(
int(row[0]), tz=datetime.timezone.utc)
value = _universal_parser(row[1])
record = Record(timestamp, value)
try:
self._append(record)
except OlderThanPreviousError:
# FIXME: remove this except, instead don't save invalid data
continue
self._summarize(record)
except OSError:
pass
@property
def current(self):
if (self.records and now - self.records[-1].timestamp <=
ALLOWED_DOWNTIME):
return self.records[-1]
else:
return None
@property
def error(self):
if not self.notify:
return None
if self.current:
self.fail_status = False
return None
if not self.fail_status:
self.fail_status = True
self.fail_counter += 1
return 'Messpunkt "{}" liefert keine Daten. (#{})'.format(
self.name, self.fail_counter)
@property
def day(self):
min_time = now - datetime.timedelta(days=1)
start = len(self.records)
while start > 0 and self.records[start - 1].timestamp >= min_time:
start -= 1
return itertools.islice(self.records, start, None)
def save(self, record):
try:
self._append(record)
except OlderThanPreviousError as err:
logging.warning('ignore {}'.format(err))
return
self._summarize(record)
self._clear()
class Temperature(Series):
def __init__(self, low, high, *args):
self.low = low
self.high = high
self.date = datetime.date.min
self.today = None
super().__init__(*args)
@classmethod
def minmax(cls, records):
minimum = maximum = None
for record in records:
if not minimum or record.value <= minimum.value:
minimum = record
if not maximum or record.value >= maximum.value:
maximum = record
return minimum, maximum
def _summarize(self, record):
date = record.timestamp.astimezone(TIMEZONE).date()
if date > self.date:
if self.today:
self.summary.append(Summary(self.date,
min(self.today), max(self.today)))
self.date = date
self.today = list()
self.today.append(record.value)
@property
def text(self):
minimum, maximum = self.minmax(self.records)
minimum_d, maximum_d = self.minmax(self.day)
yield '{}: {}'.format(
self.name, _format_temperature(self.current, self.low, self.high))
if minimum_d:
yield 'Letzte 24 Stunden: ▼ {} / ▲ {}'.format(
_format_temperature(minimum_d, self.low, self.high),
_format_temperature(maximum_d, self.low, self.high))
if minimum:
yield 'Letzte 7 Tage: ▼ {} / ▲ {}'.format(
_format_temperature(minimum, self.low, self.high),
_format_temperature(maximum, self.low, self.high))
yield 'Warnbereich unter {:.0f} °C und über {:.0f} °C'.format(
self.low, self.high)
@property
def warning(self):
current = self.current
if not current:
return None
if current.value < self.low:
return 'Messpunkt "{}" unter {} °C.'.format(self.name, self.low)
if current.value > self.high:
return 'Messpunkt "{}" über {} °C.'.format(self.name, self.high)
return None
class Switch(Series):
def __init__(self, *args):
self.date = None
super().__init__(*args)
@classmethod
def uptime(cls, segments):
total = datetime.timedelta()
for start, stop in segments:
total += stop - start
return total
@classmethod
def segments(cls, records):
expect = True
for timestamp, value in records:
# assume false during downtime
if not expect and timestamp - running > ALLOWED_DOWNTIME:
expect = True
yield start, running
if value:
running = timestamp
# identify segments
if expect != value:
continue
if expect:
expect = False
start = timestamp
else:
expect = True
yield start, timestamp
if not expect:
yield start, running
def _summarize(self, record): # TODO record.value not used
date = record.timestamp.astimezone(TIMEZONE).date()
if not self.date:
self.date = date
return
if date <= self.date:
return
lower = datetime.datetime.combine(self.date, datetime.time.min)
lower = TIMEZONE.localize(lower)
upper = datetime.datetime.combine(
self.date + datetime.timedelta(days=1),
datetime.time.min)
upper = TIMEZONE.localize(upper)
total = datetime.timedelta()
for start, end in self.segments(self.records):
if end <= lower or start >= upper:
continue
if start < lower:
start = lower
if end > upper:
end = upper
total += end - start
hours = total / datetime.timedelta(hours=1)
self.summary.append(Uptime(self.date, hours))
self.date = date
@property
def text(self):
last_false = last_true = None
for record in reversed(self.records):
if record.value:
if not last_true:
last_true = record
elif not last_false:
last_false = record
if last_false and last_true:
break
current = self.current
yield '{}: {}'.format(self.name, _format_switch(current))
if last_true and (not current or not current.value):
yield 'Zuletzt {}'.format(_format_switch(last_true))
if last_false and (not current or current.value):
yield 'Zuletzt {}'.format(_format_switch(last_false))
yield 'Letzte 24 Stunden: Einschaltdauer {}'.format(
_format_timedelta(self.uptime(self.segments(self.day))))
yield 'Letzte 7 Tage: Einschaltdauer {}'.format(
_format_timedelta(self.uptime(self.segments(self.records))))
@property
def warning(self):
return None
class OlderThanPreviousError(Exception):
pass
if __name__ == "__main__":
main()
| gpl-3.0 |
k0ste/ansible | lib/ansible/modules/package_facts.py | 6 | 14498 | #!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
default: ['auto']
choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman']
required: False
type: list
strategy:
description:
- This option controls how the module queries the package managers on the system.
C(first) means it will return only information for the first supported package manager available.
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
version_added: "2.8"
version_added: "2.5"
requirements:
- For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
- For Debian-based systems C(python-apt) package must be installed on targeted hosts.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: Gather the rpm package facts
package_facts:
manager: auto
- name: Print the rpm package facts
debug:
var: ansible_facts.packages
- name: Check whether a package called foobar is installed
debug:
msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
when: "'foobar' in ansible_facts.packages"
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description:
- Maps the package name to a non-empty list of dicts with package information.
- Every dict in the list corresponds to one installed version of the package.
- The fields described below are present for all package managers. Depending on the
package manager, there might be more fields for a package.
returned: when operating system level package manager is specified or auto detected manager
type: dict
contains:
name:
description: The package's name.
returned: always
type: str
version:
description: The package's version.
returned: always
type: str
source:
description: Where information on the package came from.
returned: always
type: str
sample: |-
{
"packages": {
"kernel": [
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
...
],
"kernel-tools": [
{
"name": "kernel-tools",
"source": "rpm",
"version": "3.10.0",
...
}
],
...
}
}
sample_rpm:
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
sample_deb:
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
import re
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
class RPM(LibMgr):
LIB = 'rpm'
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
def is_available(self):
''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
we_have_lib = super(RPM, self).is_available()
try:
get_bin_path('rpm')
if not we_have_lib:
module.warn('Found "rpm" but %s' % (missing_required_lib('rpm')))
except ValueError:
pass
return we_have_lib
class APT(LibMgr):
LIB = 'apt'
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def is_available(self):
''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
we_have_lib = super(APT, self).is_available()
if not we_have_lib:
for exe in ('apt', 'apt-get', 'aptitude'):
try:
get_bin_path(exe)
except ValueError:
continue
else:
module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
return we_have_lib
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PACMAN(CLIMgr):
CLI = 'pacman'
def list_installed(self):
rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL='C'))
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.split("\n\n")[:-1]
def get_package_details(self, package):
# parse values of details that might extend over several lines
raw_pkg_details = {}
last_detail = None
for line in package.splitlines():
m = re.match(r"([\w ]*[\w]) +: (.*)", line)
if m:
last_detail = m.group(1)
raw_pkg_details[last_detail] = m.group(2)
else:
# append value to previous detail
raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
provides = None
if raw_pkg_details['Provides'] != 'None':
provides = [
p.split('=')[0]
for p in raw_pkg_details['Provides'].split(' ')
]
return {
'name': raw_pkg_details['Name'],
'version': raw_pkg_details['Version'],
'arch': raw_pkg_details['Architecture'],
'provides': provides,
}
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(int(pkg['automatic']))
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(int(pkg['vital']))
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# start work
global module
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if found and strategy == 'first':
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
try:
try:
# manager throws exception on init (calls self.test) if not usable.
manager = PKG_MANAGERS[pkgmgr]()
if manager.is_available():
found += 1
packages.update(manager.get_packages())
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
continue
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
pcameron/javafuse-read-only | fs/nullfs/mx4j/tools/jython/mx4j.py | 7 | 1961 | """ Copyright (C) MX4J.
All rights reserved.
This software is distributed under the terms of the MX4J License version 1.0.
See the terms of the MX4J License in the documentation provided with this software.
author <a href="mailto:tibu@users.sourceforge.net">Carlos Quiroz</a>
version $Revision: 1.3 $
"""
from javax.management import *
from javax.management.monitor import *
from javax.management.timer import *
from javax.management.loading import *
from javax.management.relation import *
from javax.management.modelmbean import *
class OperationProxy:
def __init__(self, objectname, operation):
self.objectname = objectname
self.operation = operation
def invoke(self, **kw):
server.invoke(self.objectname, self.operation, None, None)
class proxy:
def __init__(self, objectname):
self.__dict__["objectname"] = objectname
info = server.getMBeanInfo(objectname)
for o in info.operations:
self.__dict__[o.name] = OperationProxy(objectname, o.name).invoke
def __getattr__(self, name):
return server.getAttribute(self.objectname, name)
def __setattr__(self, name, value):
from javax.management import Attribute
return server.setAttribute(self.objectname, Attribute(name, value))
def __repr__(self):
return "Proxy of MBean: %s " % (self.__dict__["objectname"], )
def invoke(self, name, arguments=None, types=None):
return server.invoke(self.objectname, name, arguments, types)
def mbeans(query=None):
"""
Returns a list of all the available MBeans in the server. The optional
query parameter will filter the list by objectname
"""
return server.getQueryMBeans(ObjectName(query), None)
def instances(classname, query=None):
"""
Returns a list of all the available MBeans in the server which are instances
of classname. It accepts a query parameter to filter by objectname
"""
return [x for x in mbeans(query) if server.isInstanceOf(classname)]
| gpl-3.0 |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/wsgiref/util.py | 247 | 5576 | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=1):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib import quote
path_info = quote(environ.get('PATH_INFO',''),safe='/;=,')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from StringIO import StringIO
environ.setdefault('wsgi.input', StringIO(""))
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
| apache-2.0 |
AsgerPetersen/QGIS | python/plugins/processing/gui/ScriptEdit.py | 8 | 7606 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptEdit.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import Qt, QSettings
from qgis.PyQt.QtGui import QFont, QColor, QKeySequence
from qgis.PyQt.QtWidgets import QShortcut
from qgis.core import QgsApplication
from qgis.PyQt.Qsci import QsciScintilla, QsciLexerPython, QsciAPIs
from processing.gui.LexerR import LexerR
class ScriptEdit(QsciScintilla):
LEXER_PYTHON = 0
LEXER_R = 1
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.lexer = None
self.api = None
self.lexerType = -1
self.setCommonOptions()
self.initShortcuts()
def setCommonOptions(self):
# Enable non-ASCII characters
self.setUtf8(True)
# Default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(20)
self.setFont(font)
self.setMarginsFont(font)
self.initLexer()
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setWrapMode(QsciScintilla.WrapWord)
self.setWrapVisualFlags(QsciScintilla.WrapFlagByText,
QsciScintilla.WrapFlagNone, 4)
self.setSelectionForegroundColor(QColor('#2e3436'))
self.setSelectionBackgroundColor(QColor('#babdb6'))
# Show line numbers
self.setMarginWidth(1, '000')
self.setMarginLineNumbers(1, True)
self.setMarginsForegroundColor(QColor('#2e3436'))
self.setMarginsBackgroundColor(QColor('#babdb6'))
# Highlight current line
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QColor('#d3d7cf'))
# Folding
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
self.setFoldMarginColors(QColor('#d3d7cf'), QColor('#d3d7cf'))
# Mark column 80 with vertical line
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor('#eeeeec'))
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
# Autocomletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsAPIs)
self.setFonts(10)
def setFonts(self, size):
# Load font from Python console settings
settings = QSettings()
fontName = settings.value('pythonConsole/fontfamilytext', 'Monospace')
fontSize = int(settings.value('pythonConsole/fontsize', size))
self.defaultFont = QFont(fontName)
self.defaultFont.setFixedPitch(True)
self.defaultFont.setPointSize(fontSize)
self.defaultFont.setStyleHint(QFont.TypeWriter)
self.defaultFont.setStretch(QFont.SemiCondensed)
self.defaultFont.setLetterSpacing(QFont.PercentageSpacing, 87.0)
self.defaultFont.setBold(False)
self.boldFont = QFont(self.defaultFont)
self.boldFont.setBold(True)
self.italicFont = QFont(self.defaultFont)
self.italicFont.setItalic(True)
self.setFont(self.defaultFont)
self.setMarginsFont(self.defaultFont)
def initShortcuts(self):
(ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16)
# Disable some shortcuts
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl
+ shift)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
#self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl)
#self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl)
# Use Ctrl+Space for autocompletion
self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL
+ Qt.Key_Space), self)
self.shortcutAutocomplete.setContext(Qt.WidgetShortcut)
self.shortcutAutocomplete.activated.connect(self.autoComplete)
def autoComplete(self):
self.autoCompleteFromAll()
def setLexerType(self, lexerType):
self.lexerType = lexerType
self.initLexer()
def initLexer(self):
if self.lexerType == self.LEXER_PYTHON:
self.lexer = QsciLexerPython()
colorDefault = QColor('#2e3436')
colorComment = QColor('#c00')
colorCommentBlock = QColor('#3465a4')
colorNumber = QColor('#4e9a06')
colorType = QColor('#4e9a06')
colorKeyword = QColor('#204a87')
colorString = QColor('#ce5c00')
self.lexer.setDefaultFont(self.defaultFont)
self.lexer.setDefaultColor(colorDefault)
self.lexer.setColor(colorComment, 1)
self.lexer.setColor(colorNumber, 2)
self.lexer.setColor(colorString, 3)
self.lexer.setColor(colorString, 4)
self.lexer.setColor(colorKeyword, 5)
self.lexer.setColor(colorString, 6)
self.lexer.setColor(colorString, 7)
self.lexer.setColor(colorType, 8)
self.lexer.setColor(colorCommentBlock, 12)
self.lexer.setColor(colorString, 15)
self.lexer.setFont(self.italicFont, 1)
self.lexer.setFont(self.boldFont, 5)
self.lexer.setFont(self.boldFont, 8)
self.lexer.setFont(self.italicFont, 12)
self.api = QsciAPIs(self.lexer)
settings = QSettings()
useDefaultAPI = bool(settings.value('pythonConsole/preloadAPI',
True))
if useDefaultAPI:
# Load QGIS API shipped with Python console
self.api.loadPrepared(
os.path.join(QgsApplication.pkgDataPath(),
'python', 'qsci_apis', 'pyqgis.pap'))
else:
# Load user-defined API files
apiPaths = settings.value('pythonConsole/userAPI', [])
for path in apiPaths:
self.api.load(path)
self.api.prepare()
self.lexer.setAPIs(self.api)
elif self.lexerType == self.LEXER_R:
# R lexer
self.lexer = LexerR()
self.setLexer(self.lexer)
| gpl-2.0 |
msabramo/ansible | lib/ansible/utils/module_docs_fragments/vmware.py | 149 | 1471 | # (c) 2016, Charles Paul <cpaul@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Parameters for VMware modules
DOCUMENTATION = '''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter.
required: True
username:
description:
- The username of the vSphere vCenter.
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter.
required: True
aliases: ['pass', 'pwd']
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to
false when certificates are not trusted.
required: False
default: 'True'
choices: ['True', 'False']
'''
| gpl-3.0 |
huard/scipy-work | scipy/special/setup.py | 1 | 2409 | #!/usr/bin/env python
import os
import sys
from os.path import join
from distutils.sysconfig import get_python_inc
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('special', parent_package, top_path)
define_macros = []
if sys.platform=='win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
# C libraries
config.add_library('sc_c_misc',sources=[join('c_misc','*.c')])
config.add_library('sc_cephes',sources=[join('cephes','*.c')],
include_dirs=[get_python_inc()],
macros=define_macros)
# Fortran libraries
config.add_library('sc_mach',sources=[join('mach','*.f')],
config_fc={'noopt':(__file__,1)})
config.add_library('sc_toms',sources=[join('amos','*.f')])
config.add_library('sc_amos',sources=[join('toms','*.f')])
config.add_library('sc_cdf',sources=[join('cdflib','*.f')])
config.add_library('sc_specfun',sources=[join('specfun','*.f')])
# Extension _cephes
sources = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c',
'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c']
config.add_extension('_cephes', sources=sources,
libraries=['sc_amos','sc_toms','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'],
depends=["ufunc_extras.h", "cephes.h",
"amos_wrappers.h", "toms_wrappers.h",
"cdf_wrappers.h", "specfun_wrappers.h",
"c_misc/misc.h", "cephes_doc.h",
"cephes/mconf.h", "cephes/cephes_names.h"],
define_macros = define_macros
)
# Extension specfun
config.add_extension('specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
define_macros=[],
libraries=['sc_specfun'])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
windedge/odoo | addons/point_of_sale/report/pos_payment_report.py | 380 | 3549 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class pos_payment_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_payment_report, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'pos_payment': self._pos_payment,
'pos_payment_total':self._pos_payment_total,
})
def _pos_payment(self, obj):
self.total = 0
data={}
sql = """ select id from pos_order where id = %d"""%(obj.id)
self.cr.execute(sql)
if self.cr.fetchone():
self.cr.execute ("select pt.name,pp.default_code as code,pol.qty,pu.name as uom,pol.discount,pol.price_unit, " \
"(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) as total " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt, product_uom as pu " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id and pu.id=pt.uom_id " \
"and po.state IN ('paid','invoiced') and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date and po.id=%d"%(obj.id))
data=self.cr.dictfetchall()
else:
self.cr.execute ("select pt.name,pp.default_code as code,pol.qty,pu.name as uom,pol.discount,pol.price_unit, " \
"(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) as total " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt, product_uom as pu " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id and pu.id=pt.uom_id " \
"and po.state IN ('paid','invoiced') and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date")
data=self.cr.dictfetchall()
for d in data:
self.total += d['price_unit'] * d['qty']
return data
def _pos_payment_total(self, o):
return self.total
class report_pos_payment(osv.AbstractModel):
_name = 'report.point_of_sale.report_payment'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_payment'
_wrapped_report_class = pos_payment_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shakamunyi/neutron-dvr | neutron/tests/unit/extensions/extensionattribute.py | 8 | 3259 | # Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc
#
import abc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
from neutron import quota
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'ext_test_resources': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
}
}
class Extensionattribute(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Extension Test Resource"
@classmethod
def get_alias(cls):
return "ext-obj-test"
@classmethod
def get_description(cls):
return "Extension Test Resource"
@classmethod
def get_namespace(cls):
return ""
@classmethod
def get_updated(cls):
return "2013-02-05T10:00:00-00:00"
def update_attributes_map(self, attributes):
super(Extensionattribute, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = 'ext_test_resource'
collection_name = resource_name + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
quota.QUOTAS.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params,
member_actions={})
ex = extensions.ResourceExtension(collection_name,
controller,
member_actions={})
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class ExtensionObjectTestPluginBase(object):
@abc.abstractmethod
def create_ext_test_resource(self, context, router):
pass
@abc.abstractmethod
def get_ext_test_resource(self, context, id, fields=None):
pass
| apache-2.0 |
BTA-BATA/electrum-bta-master | plugins/btchipwallet.py | 1 | 23740 | from PyQt4.Qt import QApplication, QMessageBox, QDialog, QInputDialog, QLineEdit, QVBoxLayout, QLabel, QThread, SIGNAL
import PyQt4.QtCore as QtCore
from binascii import unhexlify
from binascii import hexlify
from struct import pack,unpack
from sys import stderr
from time import sleep
from base64 import b64encode, b64decode
import electrum_bta as electrum
from electrum_bta_gui.qt.password_dialog import make_password_dialog, run_password_dialog
from electrum_bta.account import BIP32_Account
from electrum_bta.bitcoin import EncodeBase58Check, DecodeBase58Check, public_key_to_bc_address, bc_address_to_hash_160, hash_160_to_bc_address
from electrum_bta.i18n import _
from electrum_bta.plugins import BasePlugin, hook
from electrum_bta.transaction import deserialize
from electrum_bta.wallet import BIP32_HD_Wallet, BIP32_Wallet
from electrum_bta.util import format_satoshis_plain, print_error, print_msg
import hashlib
import threading
def setAlternateCoinVersions(self, regular, p2sh):
apdu = [ self.BTCHIP_CLA, 0x14, 0x00, 0x00, 0x02, regular, p2sh ]
self.dongle.exchange(bytearray(apdu))
try:
from btchip.btchipComm import getDongle, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipPersoWizard import StartBTChipPersoDialog
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
btchip.setAlternateCoinVersions = setAlternateCoinVersions
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError:
BTCHIP = False
class Plugin(BasePlugin):
def __init__(self, gui, name):
BasePlugin.__init__(self, gui, name)
self._is_available = self._init()
self.wallet = None
self.handler = None
def constructor(self, s):
return BTChipWallet(s)
def _init(self):
return BTCHIP
def is_available(self):
if not self._is_available:
return False
if not self.wallet:
return False
if self.wallet.storage.get('wallet_type') != 'btchip':
return False
return True
def set_enabled(self, enabled):
self.wallet.storage.put('use_' + self.name, enabled)
def is_enabled(self):
if not self.is_available():
return False
if self.wallet.has_seed():
return False
return True
def btchip_is_connected(self):
try:
self.wallet.get_client().getFirmwareVersion()
except:
return False
return True
@hook
def cmdline_load_wallet(self, wallet):
self.wallet = wallet
self.wallet.plugin = self
if self.handler is None:
self.handler = BTChipCmdLineHandler()
@hook
def load_wallet(self, wallet, window):
self.wallet = wallet
self.wallet.plugin = self
self.window = window
if self.handler is None:
self.handler = BTChipQTHandler(self.window.app)
if self.btchip_is_connected():
if not self.wallet.check_proper_device():
QMessageBox.information(self.window, _('Error'), _("This wallet does not match your BTChip device"), _('OK'))
self.wallet.force_watching_only = True
else:
QMessageBox.information(self.window, _('Error'), _("BTChip device not detected.\nContinuing in watching-only mode."), _('OK'))
self.wallet.force_watching_only = True
@hook
def installwizard_restore(self, wizard, storage):
if storage.get('wallet_type') != 'btchip':
return
wallet = BTChipWallet(storage)
try:
wallet.create_main_account(None)
except BaseException as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
return wallet
@hook
def sign_tx(self, tx):
tx.error = None
try:
self.wallet.sign_transaction(tx, None)
except Exception as e:
tx.error = str(e)
class BTChipWallet(BIP32_HD_Wallet):
wallet_type = 'btchip'
root_derivation = "m/44'/2'"
def __init__(self, storage):
BIP32_HD_Wallet.__init__(self, storage)
self.transport = None
self.client = None
self.mpk = None
self.device_checked = False
self.signing = False
self.force_watching_only = False
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
QMessageBox.warning(QDialog(), _('Warning'), _(message), _('OK'))
else:
self.signing = False
if clear_client and self.client is not None:
self.client.bad = True
self.device_checked = False
raise Exception(message)
def get_action(self):
if not self.accounts:
return 'create_accounts'
def can_sign_xpubkey(self, x_pubkey):
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub in self.master_public_keys.values()
def can_create_accounts(self):
return False
def synchronize(self):
# synchronize existing accounts
BIP32_Wallet.synchronize(self)
# no further accounts for the moment
def can_change_password(self):
return False
def is_watching_only(self):
return self.force_watching_only
def get_client(self, noPin=False):
if not BTCHIP:
self.give_error('please install github.com/btchip/btchip-python')
aborted = False
if not self.client or self.client.bad:
try:
d = getDongle(BTCHIP_DEBUG)
self.client = btchip(d)
self.client.handler = self.plugin.handler
ver = self.client.getFirmwareVersion()
firmware = ver['version'].split(".")
self.canAlternateCoinVersions = (ver['specialVersion'] >= 0x20 and
map(int, firmware) >= [1, 0, 1])
if not checkFirmware(firmware):
d.close()
try:
updateFirmware()
except Exception, e:
aborted = True
raise e
d = getDongle(BTCHIP_DEBUG)
self.client = btchip(d)
try:
self.client.getOperationMode()
except BTChipException, e:
if (e.sw == 0x6985):
d.close()
dialog = StartBTChipPersoDialog()
dialog.exec_()
# Then fetch the reference again as it was invalidated
d = getDongle(BTCHIP_DEBUG)
self.client = btchip(d)
else:
raise e
if not noPin:
# Immediately prompts for the PIN
remaining_attempts = self.client.getVerifyPinRemainingAttempts()
if remaining_attempts <> 1:
msg = "Enter your BTChip PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your BTChip PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
aborted = True
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.client.verifyPin(pin)
if self.canAlternateCoinVersions:
self.client.setAlternateCoinVersions(48, 5)
except BTChipException, e:
try:
self.client.dongle.close()
except:
pass
self.client = None
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
raise e
except Exception, e:
try:
self.client.dongle.close()
except:
pass
self.client = None
if not aborted:
raise Exception("Could not connect to your BTChip dongle. Please verify access permissions, PIN, or unplug the dongle and plug it again")
else:
raise e
self.client.bad = False
self.device_checked = False
self.proper_device = False
return self.client
def address_id(self, address):
account_id, (change, address_index) = self.get_address_index(address)
return "44'/2'/%s'/%d/%d" % (account_id, change, address_index)
def create_main_account(self, password):
self.create_account('Main account', None) #name, empty password
def derive_xkeys(self, root, derivation, password):
derivation = derivation.replace(self.root_name,"44'/2'/")
xpub = self.get_public_key(derivation)
return xpub, None
def get_private_key(self, address, password):
return []
def get_public_key(self, bip32_path):
# S-L-O-W - we don't handle the fingerprint directly, so compute it manually from the previous node
# This only happens once so it's bearable
self.get_client() # prompt for the PIN before displaying the dialog if necessary
self.plugin.handler.show_message("Computing master public key")
try:
splitPath = bip32_path.split('/')
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.get_client().getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.get_client().getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
if len(lastChild) == 1:
childnum = int(lastChild[0])
else:
childnum = 0x80000000 | int(lastChild[0])
xpub = "0488B21E".decode('hex') + chr(depth) + self.i4b(fingerprint) + self.i4b(childnum) + str(nodeData['chainCode']) + str(publicKey)
except Exception, e:
self.give_error(e, True)
finally:
self.plugin.handler.stop()
return EncodeBase58Check(xpub)
def get_master_public_key(self):
try:
if not self.mpk:
self.mpk = self.get_public_key("44'/2'")
return self.mpk
except Exception, e:
self.give_error(e, True)
def i4b(self, x):
return pack('>I', x)
def add_keypairs(self, tx, keypairs, password):
#do nothing - no priv keys available
pass
def decrypt_message(self, pubkey, message, password):
self.give_error("Not supported")
def sign_message(self, address, message, password):
use2FA = False
self.signing = True
self.get_client() # prompt for the PIN before displaying the dialog if necessary
if not self.check_proper_device():
self.give_error('Wrong device or password')
address_path = self.address_id(address)
self.plugin.handler.show_message("Signing message ...")
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
# TODO : handle different confirmation types. For the time being only supports keyboard 2FA
use2FA = True
confirmed, p, pin = self.password_dialog()
if not confirmed:
raise Exception('Aborted by user')
pin = pin.encode()
self.client.bad = True
self.device_checked = False
self.get_client(True)
signature = self.get_client().signMessageSign(pin)
except BTChipException, e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by BTChip. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
else:
self.give_error(e, True)
except Exception, e:
self.give_error(e, True)
finally:
self.plugin.handler.stop()
self.client.bad = use2FA
self.signing = False
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
r = str(r)
s = str(s)
# And convert it
return b64encode(chr(27 + 4 + (signature[0] & 0x01)) + r + s)
def sign_transaction(self, tx, password):
if tx.is_complete():
return
#if tx.error:
# raise BaseException(tx.error)
self.signing = True
inputs = []
inputsPaths = []
pubKeys = []
trustedInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
use2FA = False
pin = ""
rawTx = tx.serialize()
# Fetch inputs of the transaction to sign
for txinput in tx.inputs:
if ('is_coinbase' in txinput and txinput['is_coinbase']):
self.give_error("Coinbase not supported") # should never happen
inputs.append([ self.transactions[txinput['prevout_hash']].raw,
txinput['prevout_n'] ])
address = txinput['address']
inputsPaths.append(self.address_id(address))
pubKeys.append(self.get_public_keys(address))
# Recognize outputs - only one output and one change is authorized
if len(tx.outputs) > 2: # should never happen
self.give_error("Transaction with more than 2 outputs not supported")
for type, address, amount in tx.outputs:
assert type == 'address'
if self.is_change(address):
changePath = self.address_id(address)
changeAmount = amount
else:
if output <> None: # should never happen
self.give_error("Multiple outputs with no change not supported")
output = address
if not self.canAlternateCoinVersions:
v, h = bc_address_to_hash_160(address)
if v == 48:
output = hash_160_to_bc_address(h, 0)
outputAmount = amount
self.get_client() # prompt for the PIN before displaying the dialog if necessary
if not self.check_proper_device():
self.give_error('Wrong device or password')
self.plugin.handler.show_message("Signing Transaction ...")
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
txtmp = bitcoinTransaction(bytearray(utxo[0].decode('hex')))
trustedInputs.append(self.get_client().getTrustedInput(txtmp, utxo[1]))
# TODO : Support P2SH later
redeemScripts.append(txtmp.outputs[utxo[1]].script)
# Sign all inputs
firstTransaction = True
inputIndex = 0
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
trustedInputs, redeemScripts[inputIndex])
outputData = self.get_client().finalizeInput(output, format_satoshis_plain(outputAmount),
format_satoshis_plain(self.get_tx_fee(tx)), changePath, bytearray(rawTx.decode('hex')))
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
# TODO : handle different confirmation types. For the time being only supports keyboard 2FA
self.plugin.handler.stop()
if 'keycardData' in outputData:
pin2 = ""
for keycardIndex in range(len(outputData['keycardData'])):
msg = "Do not enter your device PIN here !\r\n\r\n" + \
"Your BTChip wants to talk to you and tell you a unique second factor code.\r\n" + \
"For this to work, please match the character between stars of the output address using your security card\r\n\r\n" + \
"Output address : "
for index in range(len(output)):
if index == outputData['keycardData'][keycardIndex]:
msg = msg + "*" + output[index] + "*"
else:
msg = msg + output[index]
msg = msg + "\r\n"
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user')
try:
pin2 = pin2 + chr(int(pin[0], 16))
except:
raise Exception('Invalid PIN character')
pin = pin2
else:
use2FA = True
confirmed, p, pin = self.password_dialog()
if not confirmed:
raise Exception('Aborted by user')
pin = pin.encode()
self.client.bad = True
self.device_checked = False
self.get_client(True)
self.plugin.handler.show_message("Signing ...")
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex],
pin)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
firstTransaction = False
except Exception, e:
self.give_error(e, True)
finally:
self.plugin.handler.stop()
# Reformat transaction
inputIndex = 0
while inputIndex < len(inputs):
# TODO : Support P2SH later
inputScript = get_regular_input_script(signatures[inputIndex], pubKeys[inputIndex][0].decode('hex'))
preparedTrustedInputs.append([ trustedInputs[inputIndex]['value'], inputScript ])
inputIndex = inputIndex + 1
updatedTransaction = format_transaction(transactionOutput, preparedTrustedInputs)
updatedTransaction = hexlify(updatedTransaction)
tx.update(updatedTransaction)
self.client.bad = use2FA
self.signing = False
def check_proper_device(self):
pubKey = DecodeBase58Check(self.master_public_keys["x/0'"])[45:]
if not self.device_checked:
self.plugin.handler.show_message("Checking device")
try:
nodeData = self.get_client().getWalletPublicKey("44'/2'/0'")
except Exception, e:
self.give_error(e, True)
finally:
self.plugin.handler.stop()
pubKeyDevice = compress_public_key(nodeData['publicKey'])
self.device_checked = True
if pubKey != pubKeyDevice:
self.proper_device = False
else:
self.proper_device = True
return self.proper_device
def password_dialog(self, msg=None):
if not msg:
msg = _("Do not enter your device PIN here !\r\n\r\n" \
"Your BTChip wants to talk to you and tell you a unique second factor code.\r\n" \
"For this to work, please open a text editor (on a different computer / device if you believe this computer is compromised) and put your cursor into it, unplug your BTChip and plug it back in.\r\n" \
"It should show itself to your computer as a keyboard and output the second factor along with a summary of the transaction it is signing into the text-editor.\r\n\r\n" \
"Check that summary and then enter the second factor code here.\r\n" \
"Before clicking OK, re-plug the device once more (unplug it and plug it again if you read the second factor code on the same computer)")
response = self.plugin.handler.prompt_auth(msg)
if response is None:
return False, None, None
return True, response, response
class BTChipQTHandler:
def __init__(self, win):
self.win = win
self.win.connect(win, SIGNAL('btchip_done'), self.dialog_stop)
self.win.connect(win, SIGNAL('message_dialog'), self.message_dialog)
self.win.connect(win, SIGNAL('auth_dialog'), self.auth_dialog)
self.done = threading.Event()
def stop(self):
self.win.emit(SIGNAL('btchip_done'))
def show_message(self, msg):
self.message = msg
self.win.emit(SIGNAL('message_dialog'))
def prompt_auth(self, msg):
self.done.clear()
self.message = msg
self.win.emit(SIGNAL('auth_dialog'))
self.done.wait()
return self.response
def auth_dialog(self):
response = QInputDialog.getText(None, "BTChip Authentication", self.message, QLineEdit.Password)
if not response[1]:
self.response = None
else:
self.response = str(response[0])
self.done.set()
def message_dialog(self):
self.d = QDialog()
self.d.setModal(1)
self.d.setWindowTitle('BTChip')
self.d.setWindowFlags(self.d.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
l = QLabel(self.message)
vbox = QVBoxLayout(self.d)
vbox.addWidget(l)
self.d.show()
def dialog_stop(self):
if self.d is not None:
self.d.hide()
self.d = None
class BTChipCmdLineHandler:
def stop(self):
pass
def show_message(self, msg):
print_msg(msg)
def prompt_auth(self, msg):
import getpass
print_msg(msg)
response = getpass.getpass('')
if len(response) == 0:
return None
return response
| gpl-3.0 |
adhish20/TwitterWithCassandra | twiss/lib/python2.7/site-packages/pip/_vendor/requests/models.py | 410 | 29176 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(
to_native_string(url, 'utf8')))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit |
cmouse/buildbot | master/buildbot/process/users/users.py | 3 | 4287 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from binascii import hexlify
from hashlib import sha1
from twisted.internet import defer
from twisted.python import log
from buildbot.util import bytes2unicode
from buildbot.util import unicode2bytes
# TODO: fossil comes from a plugin. We should have an API that plugins could use to
# register allowed user types.
srcs = ['git', 'svn', 'hg', 'cvs', 'darcs', 'bzr', 'fossil']
salt_len = 8
def createUserObject(master, author, src=None):
"""
Take a Change author and source and translate them into a User Object,
storing the user in master.db, or returning None if the src is not
specified.
@param master: link to Buildmaster for database operations
@type master: master.Buildmaster instance
@param authors: Change author if string or Authz instance
@type authors: string or www.authz instance
@param src: source from which the User Object will be created
@type src: string
"""
if not src:
log.msg("No vcs information found, unable to create User Object")
return defer.succeed(None)
if src in srcs:
usdict = dict(identifier=author, attr_type=src, attr_data=author)
else:
log.msg("Unrecognized source argument: {}".format(src))
return defer.succeed(None)
return master.db.users.findUserByAttr(
identifier=usdict['identifier'],
attr_type=usdict['attr_type'],
attr_data=usdict['attr_data'])
def _extractContact(usdict, contact_types, uid):
if usdict:
for type in contact_types:
contact = usdict.get(type)
if contact:
break
else:
contact = None
if contact is None:
log.msg(format="Unable to find any of %(contact_types)r for uid: %(uid)r",
contact_types=contact_types, uid=uid)
return contact
def getUserContact(master, contact_types, uid):
"""
This is a simple getter function that returns a user attribute
that matches the contact_types argument, or returns None if no
uid/match is found.
@param master: BuildMaster used to query the database
@type master: BuildMaster instance
@param contact_types: list of contact attributes to look for in
in a given user, such as 'email' or 'nick'
@type contact_types: list of strings
@param uid: user that is searched for the contact_types match
@type uid: integer
@returns: string of contact information or None via deferred
"""
d = master.db.users.getUser(uid)
d.addCallback(_extractContact, contact_types, uid)
return d
def encrypt(passwd):
"""
Encrypts the incoming password after adding some salt to store
it in the database.
@param passwd: password portion of user credentials
@type passwd: string
@returns: encrypted/salted string
"""
m = sha1()
salt = hexlify(os.urandom(salt_len))
m.update(unicode2bytes(passwd) + salt)
crypted = bytes2unicode(salt) + m.hexdigest()
return crypted
def check_passwd(guess, passwd):
"""
Tests to see if the guess, after salting and hashing, matches the
passwd from the database.
@param guess: incoming password trying to be used for authentication
@param passwd: already encrypted password from the database
@returns: boolean
"""
m = sha1()
salt = passwd[:salt_len * 2] # salt_len * 2 due to encode('hex_codec')
m.update(unicode2bytes(guess) + unicode2bytes(salt))
crypted_guess = bytes2unicode(salt) + m.hexdigest()
return (crypted_guess == bytes2unicode(passwd))
| gpl-2.0 |
openstack/neutron | neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py | 5 | 1186 | # Copyright 2012 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib import constants as n_const
from oslo_log import log
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import constants
LOG = log.getLogger(__name__)
def get_tap_device_name(interface_id):
"""Convert port ID into device name format expected by linux bridge."""
if not interface_id:
LOG.warning("Invalid Interface ID, will lead to incorrect "
"tap device name")
tap_device_name = (n_const.TAP_DEVICE_PREFIX +
interface_id[:constants.RESOURCE_ID_LENGTH])
return tap_device_name
| apache-2.0 |
Sylrob434/CouchPotatoServer | libs/rsa/_version133.py | 177 | 11274 | """RSA module
pri = k[1] //Private part of keys d,p,q
Module for calculating large primes, and RSA encryption, decryption,
signing and verification. Includes generating public and private keys.
WARNING: this code implements the mathematics of RSA. It is not suitable for
real-world secure cryptography purposes. It has not been reviewed by a security
expert. It does not include padding of data. There are many ways in which the
output of this module, when used without any modification, can be sucessfully
attacked.
"""
__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
__date__ = "2010-02-05"
__version__ = '1.3.3'
# NOTE: Python's modulo can return negative numbers. We compensate for
# this behaviour using the abs() function
from cPickle import dumps, loads
import base64
import math
import os
import random
import sys
import types
import zlib
from rsa._compat import byte
# Display a warning that this insecure version is imported.
import warnings
warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
% __name__)
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(42, 6)
6
"""
if p<q: return gcd(q, p)
if q == 0: return p
return gcd(q, abs(p%q))
def bytes2int(bytes):
"""Converts a list of bytes or a string to an integer
>>> (128*256 + 64)*256 + + 15
8405007
>>> l = [128, 64, 15]
>>> bytes2int(l)
8405007
"""
if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
raise TypeError("You must pass a string or a list")
# Convert byte stream to integer
integer = 0
for byte in bytes:
integer *= 256
if type(byte) is types.StringType: byte = ord(byte)
integer += byte
return integer
def int2bytes(number):
"""Converts a number to a string of bytes
>>> bytes2int(int2bytes(123456789))
123456789
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (byte(number & 0xFF), string)
number /= 256
return string
def fast_exponentiation(a, p, n):
"""Calculates r = a^p mod n
"""
result = a % n
remainders = []
while p != 1:
remainders.append(p & 1)
p = p >> 1
while remainders:
rem = remainders.pop()
result = ((a ** rem) * result ** 2) % n
return result
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits rounded up
to whole bytes"""
nbytes = ceil(nbits/8.)
randomdata = os.urandom(nbytes)
return bytes2int(randomdata)
def ceil(x):
"""ceil(x) -> int(math.ceil(x))"""
return int(math.ceil(x))
def randint(minvalue, maxvalue):
"""Returns a random integer x with minvalue <= x <= maxvalue"""
# Safety - get a lot of random data even if the range is fairly
# small
min_nbits = 32
# The range of the random numbers we need to generate
range = maxvalue - minvalue
# Which is this number of bytes
rangebytes = ceil(math.log(range, 2) / 8.)
# Convert to bits, but make sure it's always at least min_nbits*2
rangebits = max(rangebytes * 8, min_nbits * 2)
# Take a random number of bits between min_nbits and rangebits
nbits = random.randint(min_nbits, rangebits)
return (read_random_int(nbits) % range) + minvalue
def fermat_little_theorem(p):
"""Returns 1 if p may be prime, and something else if p definitely
is not prime"""
a = randint(1, p-1)
return fast_exponentiation(a, p-1, p)
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b)
"""
if a % b == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
b, a = a, b % a
else:
if ((b ** 2 - 1) >> 3) & 1:
result = -result
a = a >> 1
return result
def jacobi_witness(x, n):
"""Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
"""
j = jacobi(x, n) % n
f = fast_exponentiation(x, (n-1)/2, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number if composite, and True if it's
probably prime.
"""
q = 0.5 # Property of the jacobi_witness function
# t = int(math.ceil(k / math.log(1/q, 2)))
t = ceil(k / math.log(1/q, 2))
for i in range(t+1):
x = randint(1, n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(42)
0
>>> is_prime(41)
1
"""
"""
if not fermat_little_theorem(number) == 1:
# Not prime, according to Fermat's little theorem
return False
"""
if randomized_primality_testing(number, 5):
# Prime, according to Jacobi
return True
# Not prime
return False
def getprime(nbits):
"""Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
other words: nbits is rounded up to whole bytes.
>>> p = getprime(8)
>>> is_prime(p-1)
0
>>> is_prime(p)
1
>>> is_prime(p+1)
0
"""
nbytes = int(math.ceil(nbits/8.))
while True:
integer = read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer): break
# Retry if not prime
return integer
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
1
>>> are_relatively_prime(2, 4)
0
"""
d = gcd(a, b)
return (d == 1)
def find_p_q(nbits):
"""Returns a tuple of two different primes of nbits bits"""
p = getprime(nbits)
while True:
q = getprime(nbits)
if not q == p: break
return (p, q)
def extended_euclid_gcd(a, b):
"""Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
"""
if b == 0:
return (a, 1, 0)
q = abs(a % b)
r = long(a / b)
(d, k, l) = extended_euclid_gcd(b, q)
return (d, l, k - l*r)
# Main function: calculate encryption and decryption keys
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key for p and q, and
returns them as a tuple (e, d)"""
n = p * q
phi_n = (p-1) * (q-1)
while True:
# Make sure e has enough bits so we ensure "wrapping" through
# modulo n
e = getprime(max(8, nbits/2))
if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
(d, i, j) = extended_euclid_gcd(e, phi_n)
if not d == 1:
raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
if not (e * i) % phi_n == 1:
raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
return (e, i)
def gen_keys(nbits):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
"""
while True:
(p, q) = find_p_q(nbits)
(e, d) = calculate_keys(p, q, nbits)
# For some reason, d is sometimes negative. We don't know how
# to fix it (yet), so we keep trying until everything is shiny
if d > 0: break
return (p, q, e, d)
def gen_pubpriv_keys(nbits):
"""Generates public and private keys, and returns them as (pub,
priv).
The public key consists of a dict {e: ..., , n: ....). The private
key consists of a dict {d: ...., p: ...., q: ....).
"""
(p, q, e, d) = gen_keys(nbits)
return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo
n"""
if type(message) is types.IntType:
return encrypt_int(long(message), ekey, n)
if not type(message) is types.LongType:
raise TypeError("You must pass a long or an int")
if message > 0 and \
math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
raise OverflowError("The message is too long")
return fast_exponentiation(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
return encrypt_int(cyphertext, dkey, n)
def sign_int(message, dkey, n):
"""Signs 'message' using key 'dkey', working modulo n"""
return decrypt_int(message, dkey, n)
def verify_int(signed, ekey, n):
"""verifies 'signed' using key 'ekey', working modulo n"""
return encrypt_int(signed, ekey, n)
def picklechops(chops):
"""Pickles and base64encodes it's argument chops"""
value = zlib.compress(dumps(chops))
encoded = base64.encodestring(value)
return encoded.strip()
def unpicklechops(string):
"""base64decodes and unpickes it's argument string into chops"""
return loads(zlib.decompress(base64.decodestring(string)))
def chopstring(message, key, n, funcref):
"""Splits 'message' into chops that are at most as long as n,
converts these into integers, and calls funcref(integer, key, n)
for each chop.
Used by 'encrypt' and 'sign'.
"""
msglen = len(message)
mbits = msglen * 8
nbits = int(math.floor(math.log(n, 2)))
nbytes = nbits / 8
blocks = msglen / nbytes
if msglen % nbytes > 0:
blocks += 1
cypher = []
for bindex in range(blocks):
offset = bindex * nbytes
block = message[offset:offset+nbytes]
value = bytes2int(block)
cypher.append(funcref(value, key, n))
return picklechops(cypher)
def gluechops(chops, key, n, funcref):
"""Glues chops back together into a string. calls
funcref(integer, key, n) for each chop.
Used by 'decrypt' and 'verify'.
"""
message = ""
chops = unpicklechops(chops)
for cpart in chops:
mpart = funcref(cpart, key, n)
message += int2bytes(mpart)
return message
def encrypt(message, key):
"""Encrypts a string 'message' with the public key 'key'"""
return chopstring(message, key['e'], key['n'], encrypt_int)
def sign(message, key):
"""Signs a string 'message' with the private key 'key'"""
return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
def decrypt(cypher, key):
"""Decrypts a cypher with the private key 'key'"""
return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
def verify(cypher, key):
"""Verifies a cypher with the public key 'key'"""
return gluechops(cypher, key['e'], key['n'], encrypt_int)
# Do doctest if we're not imported
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
| gpl-3.0 |
menegon/geonode | scripts/cloud/demo_site.py | 22 | 2749 | import json
import jenkins
import sys
from optparse import OptionParser
from six.moves.urllib.request import Request
JENKINS_IP = 'http://52.7.139.177/'
GEONODE_DEMO_DOMAIN = 'demo.geonode.org' # should match the jenkins configuration
NODE_LIST = 'computer/api/json' # jenkins api backend
GEONODE_DEMO_JOB = 'geonode-aws' # jenkins job name for demo site
class DemoGeonode(object):
"""
This class allows interaction with the Jenkins APIs to do several tasks,
for a more detailed guide on how to use self.j see
https://python-jenkins.readthedocs.org/en/latest/api.html
"""
def __init__(self, username, token):
self.j = jenkins.Jenkins(JENKINS_IP, username, token)
def redeployDemo(self):
"""Delete the jenkins node on which runs the amazon VM,
this will both shutdown the VM and create a new one with a fresh geonode instance"""
nodes_data = json.loads(self.j.jenkins_open(Request(self.j.server + NODE_LIST)))
demo_node = self.getDemoNode(nodes_data)
if demo_node is not None:
self.deleteNode(demo_node)
self.buildJob(GEONODE_DEMO_JOB)
print 're-deploy complete!'
else:
print 'No demo.genode.org node found on jenkins'
def getDemoNode(self, nodes_data):
"""Commodity method to get the correct jenkins node name,
the name is composed by 'demo.geonode.org' and the VM id"""
demo_node = None
for node in nodes_data['computer']:
if GEONODE_DEMO_DOMAIN in node['displayName']:
demo_node = node['displayName']
return demo_node
def deleteNode(self, node_name):
"""Delete the jenkins node and shutdown the amazon VM"""
print 'Deleting demo node'
self.j.delete_node(node_name)
print 'Deletion requested'
def buildJob(self, job):
"""Trigger a job build"""
print 'Building %s job' % job
self.j.build_job(job)
print 'Build requested'
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-u", "--username", dest="username",
help="jenkins username")
parser.add_option("-t", "--token", dest="token",
help="jenkins access token")
(options, args) = parser.parse_args()
if options.username is not None and options.token is not None:
task = sys.argv[-1]
demo = DemoGeonode(options.username, options.token)
if task == 'redeploy-demo-site':
demo.redeployDemo()
elif task == 'build-demo-job':
demo.buildJob(GEONODE_DEMO_JOB)
else:
print 'Command not found'
else:
print 'username and access token are both required'
| gpl-3.0 |
viniciusfk9/LearningFlask | crimemap/crimemap.py | 1 | 1644 | import datetime
import json
import string
import dateparser
from flask import Flask
from flask import render_template
from flask import request
import dbconfig
if dbconfig.test:
from mockdbhelper import MockDBHelper as DBHelper
else:
from dbhelper import DBHelper
app = Flask(__name__)
DB = DBHelper()
categories = ['mugging', 'break-in']
@app.route("/")
def home(error_message=None):
crimes = DB.get_all_crimes()
crimes = json.dumps(crimes)
return render_template("home.html", crimes=crimes, categories=categories,
error_message=error_message)
@app.route("/submitcrime", methods=['POST'])
def submit_crime():
category = request.form.get("category")
if category not in categories:
return home()
date = format_date(request.form.get("date"))
if not date:
return home("Invalid date. Please use yyyy-mm-dd format")
try:
latitude = float(request.form.get("latitude"))
longitude = float(request.form.get("longitude"))
except ValueError:
return home()
description = sanitize_string(request.form.get("description"))
DB.add_crime(category, date, latitude, longitude, description)
return home()
def format_date(user_date):
date = dateparser.parse(user_date)
try:
return datetime.datetime.strftime(date, "%Y-%m-%d")
except (ValueError, TypeError) as _:
return None
def sanitize_string(user_input):
white_list = string.letters + string.digits + " !?$.,;:-'()&"
return filter(lambda x: x in white_list, user_input)
if __name__ == '__main__':
app.run(port=5000, debug=True)
| gpl-3.0 |
matsumoto-r/synciga | src/tools/gyp/test/ninja/normalize-paths-win/gyptest-normalize-paths.py | 180 | 1272 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure paths are normalized with VS macros properly expanded on Windows.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('normalize-paths.gyp')
# We can't use existence tests because any case will pass, so we check the
# contents of ninja files directly since that's what we're most concerned
# with anyway.
subninja = open(test.built_file_path('obj/some_target.ninja')).read()
if '$!product_dir' in subninja:
test.fail_test()
if 'out\\Default' in subninja:
test.fail_test()
second = open(test.built_file_path('obj/second.ninja')).read()
if ('..\\..\\things\\AnotherName.exe' in second or
'AnotherName.exe' not in second):
test.fail_test()
action = open(test.built_file_path('obj/action.ninja')).read()
if '..\\..\\out\\Default' in action:
test.fail_test()
if '..\\..\\SomethingElse' in action or 'SomethingElse' not in action:
test.fail_test()
if '..\\..\\SomeOtherInput' in action or 'SomeOtherInput' not in action:
test.fail_test()
test.pass_test()
| bsd-3-clause |
40223234/40223234 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/base.py | 603 | 4652 | #!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
| gpl-3.0 |
mjirayu/sit_academy | lms/djangoapps/instructor/management/commands/openended_stats.py | 86 | 5361 | """
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
usage_key = course_id.make_usage_key_from_deprecated_string(args[1])
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(usage_key, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(usage_key)
return
try:
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, usage_key, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, location, course=course)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, None), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, None), student.username))
return stats
| agpl-3.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/markupbase.py | 9 | 15039 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| agpl-3.0 |
johanneswilm/eha-nutsurv-django | nutsurv/dashboard/serializers.py | 2 | 4788 | from rest_framework import serializers
from django.contrib.auth.models import User
from rest_framework_gis.serializers import GeoModelSerializer
from .models import Alert, HouseholdSurveyJSON, TeamMember, HouseholdMember
class JSONSerializerField(serializers.Field):
""" Serializer for JSONField -- required to make field writable"""
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email']
class SimpleUserSerializer(UserSerializer):
class Meta:
model = User
fields = ['username', 'email']
class HouseholdMemberSerializer(serializers.HyperlinkedModelSerializer):
extra_questions = JSONSerializerField()
class Meta:
model = HouseholdMember
fields = [
'index',
'first_name',
'gender',
'muac',
'birthdate',
'weight',
'height',
'height_type',
'extra_questions',
'household_survey',
'edema',
]
class SimpleHouseholdMemberSerializer(HouseholdMemberSerializer):
class Meta:
model = HouseholdMember
fields = [
'index',
'first_name',
'gender',
'muac',
'birthdate',
'weight',
'height',
'height_type',
'extra_questions',
'edema',
]
class HouseholdSurveyJSONSerializer(serializers.HyperlinkedModelSerializer, GeoModelSerializer):
members = SimpleHouseholdMemberSerializer(many=True, read_only=False)
def create(self, validated_data):
family_members = validated_data.pop('members', [])
instance = super(HouseholdSurveyJSONSerializer, self).create(validated_data)
validated_data['members'] = family_members
self.update(instance, validated_data)
return instance
def update(self, instance, validated_data):
family_members = validated_data.pop('members', [])
super(HouseholdSurveyJSONSerializer, self).update(instance, validated_data)
instance.members.all().delete()
new_family = [HouseholdMember(household_survey=instance, **family_member)
for family_member in family_members]
HouseholdMember.objects.bulk_create(new_family)
return instance
class Meta:
model = HouseholdSurveyJSON
geo_field = "location"
fields = (
'url',
'uuid',
'household_number',
'members',
'team_lead',
'team_assistant',
'team_anthropometrist',
'first_admin_level',
'second_admin_level',
'cluster',
'cluster_name',
'cluster_population',
'cluster_segment_population',
'start_time',
'end_time',
'location',
)
class TeamMemberSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='teammember-detail')
mobile = serializers.CharField(required=False)
last_survey = HouseholdSurveyJSONSerializer(many=False, read_only=True)
class Meta:
model = TeamMember
fields = ['url',
'id',
'first_name',
'last_name',
'gender',
'birth_year',
'mobile',
'email',
'last_survey',
]
class SimpleTeamMemberSerializer(TeamMemberSerializer):
class Meta:
model = TeamMember
fields = ['url',
'id',
'first_name',
'last_name',
'gender',
'birth_year',
'mobile',
'email',
]
class AlertSerializer(serializers.HyperlinkedModelSerializer):
team_lead = SimpleTeamMemberSerializer(many=False, read_only=True)
class Meta:
model = Alert
fields = (
'url',
'id',
# fields
'category',
'archived',
'created',
'completed',
'team_lead',
'survey',
# TODO fields still in json
'cluster_id',
'location',
'type',
'survey_id',
)
class SurveyMapSerializer(serializers.ModelSerializer):
class Meta:
model = HouseholdSurveyJSON
fields = (
'location',
'team_lead',
'cluster'
)
| agpl-3.0 |
MyAOSP/external_chromium_org | chrome/app/theme/PRESUBMIT.py | 121 | 1455 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium theme resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
resources = input_api.os_path.join(input_api.PresubmitLocalPath(),
'../../../ui/resources')
# List of paths with their associated scale factor. This is used to verify
# that the images modified in one are the correct scale of the other.
path_scales = [
[(100, 'default_100_percent/'), (200, 'default_200_percent/')],
]
import sys
old_path = sys.path
try:
sys.path = [resources] + old_path
from resource_check import resource_scale_factors
for paths in path_scales:
results.extend(resource_scale_factors.ResourceScaleFactors(
input_api, output_api, paths).RunChecks())
finally:
sys.path = old_path
return results
| bsd-3-clause |
TraderZed/greygardens | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit |
centwave/jg82ksgvqkuan | django/core/mail/backends/base.py | 660 | 1164 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
| bsd-3-clause |
tima/ansible | lib/ansible/modules/clustering/consul_session.py | 41 | 8927 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_session
short_description: Manipulate consul sessions
description:
- Allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found at http://www.consul.io/docs/internals/sessions.html
requirements:
- python >= 2.6
- python-consul
- requests
version_added: "2.0"
author:
- Steve Gargan (@sgargan)
options:
state:
description:
- Whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the ID for the
session is returned in the output. If absent, the name or ID is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying info, node or list for the state; for node or info, the
node name or session id is required as parameter.
choices: [ absent, info, list, node, present ]
default: present
name:
description:
- The name that should be associated with the session. This is opaque
to Consul and not required.
delay:
description:
- The optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds.
default: 15
node:
description:
- The name of the node that with which the session will be associated.
by default this is the name of the agent.
datacenter:
description:
- The name of the datacenter in which the session exists or should be
created.
checks:
description:
- A list of checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
host:
description:
- The host of the consul agent defaults to localhost.
default: localhost
port:
description:
- The port on which the consul agent is running.
default: 8500
scheme:
description:
- The protocol scheme on which the consul agent is running.
default: http
version_added: "2.1"
validate_certs:
description:
- Whether to verify the tls certificate of the consul agent.
type: bool
default: True
version_added: "2.1"
behavior:
description:
- The optional behavior that can be attached to the session when it
is created. This controls the behavior when a session is invalidated.
choices: [ delete, release ]
default: release
version_added: "2.2"
"""
EXAMPLES = '''
- name: register basic session with consul
consul_session:
name: session1
- name: register a session with an existing check
consul_session:
name: session_with_check
checks:
- existing_check_name
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20s
- name: retrieve info about session by id
consul_session:
id: session_id
state: info
- name: retrieve active sessions
consul_session:
state: list
'''
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
def execute(module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module)
elif state == 'present':
update_session(module)
else:
remove_session(module)
def lookup_sessions(module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
consul_client = get_consul_api(module)
try:
if state == 'list':
sessions_list = consul_client.session.list(dc=datacenter)
# Ditch the index, this can be grabbed from the results
if sessions_list and sessions_list[1]:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
if not node:
module.fail_json(
msg="node name is required to retrieve sessions for node")
sessions = consul_client.session.node(node, dc=datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
if not session_id:
module.fail_json(
msg="session_id is required to retrieve indvidual session info")
session_by_id = consul_client.session.info(session_id, dc=datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def update_session(module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
consul_client = get_consul_api(module)
try:
session = consul_client.session.create(
name=name,
behavior=behavior,
node=node,
lock_delay=delay,
dc=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def remove_session(module):
session_id = module.params.get('id')
if not session_id:
module.fail_json(msg="""A session id must be supplied in order to
remove a session.""")
consul_client = get_consul_api(module)
try:
consul_client.session.destroy(session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "
"see http://python-consul.readthedocs.org/en/latest/#installation")
def main():
argument_spec = dict(
checks=dict(type='list'),
delay=dict(type='int', default='15'),
behavior=dict(type='str', default='release', choices=['release', 'delete']),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=8500),
scheme=dict(type='str', default='http'),
validate_certs=dict(type='bool', default=True),
id=dict(type='str'),
name=dict(type='str'),
node=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
datacenter=dict(type='str'),
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), e))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
HousekeepLtd/django | tests/forms_tests/widget_tests/test_splitdatetimewidget.py | 202 | 1943 | from datetime import date, datetime, time
from django.forms import SplitDateTimeWidget
from .base import WidgetTest
class SplitDateTimeWidgetTest(WidgetTest):
widget = SplitDateTimeWidget()
def test_render_empty(self):
self.check_html(self.widget, 'date', '', html=(
'<input type="text" name="date_0" /><input type="text" name="date_1" />'
))
def test_render_none(self):
self.check_html(self.widget, 'date', None, html=(
'<input type="text" name="date_0" /><input type="text" name="date_1" />'
))
def test_render_datetime(self):
self.check_html(self.widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="2006-01-10" />'
'<input type="text" name="date_1" value="07:30:00" />'
))
def test_render_date_and_time(self):
self.check_html(self.widget, 'date', [date(2006, 1, 10), time(7, 30)], html=(
'<input type="text" name="date_0" value="2006-01-10" />'
'<input type="text" name="date_1" value="07:30:00" />'
))
def test_constructor_attrs(self):
widget = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" class="pretty" value="2006-01-10" name="date_0" />'
'<input type="text" class="pretty" value="07:30:00" name="date_1" />'
))
def test_formatting(self):
"""
Use 'date_format' and 'time_format' to change the way a value is
displayed.
"""
widget = SplitDateTimeWidget(
date_format='%d/%m/%Y', time_format='%H:%M',
)
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="10/01/2006" />'
'<input type="text" name="date_1" value="07:30" />'
))
| bsd-3-clause |
partofthething/home-assistant | tests/components/onewire/test_init.py | 4 | 3085 | """Tests for 1-Wire config flow."""
from unittest.mock import patch
from pyownet.protocol import ConnError, OwnetError
from homeassistant.components.onewire.const import CONF_TYPE_OWSERVER, DOMAIN
from homeassistant.config_entries import (
CONN_CLASS_LOCAL_POLL,
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TYPE
from . import setup_onewire_owserver_integration, setup_onewire_sysbus_integration
from tests.common import MockConfigEntry
async def test_owserver_connect_failure(hass):
"""Test connection failure raises ConfigEntryNotReady."""
config_entry_owserver = MockConfigEntry(
domain=DOMAIN,
source="user",
data={
CONF_TYPE: CONF_TYPE_OWSERVER,
CONF_HOST: "1.2.3.4",
CONF_PORT: "1234",
},
unique_id=f"{CONF_TYPE_OWSERVER}:1.2.3.4:1234",
connection_class=CONN_CLASS_LOCAL_POLL,
options={},
entry_id="2",
)
config_entry_owserver.add_to_hass(hass)
with patch(
"homeassistant.components.onewire.onewirehub.protocol.proxy",
side_effect=ConnError,
):
await hass.config_entries.async_setup(config_entry_owserver.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert config_entry_owserver.state == ENTRY_STATE_SETUP_RETRY
assert not hass.data.get(DOMAIN)
async def test_failed_owserver_listing(hass):
"""Create the 1-Wire integration."""
config_entry_owserver = MockConfigEntry(
domain=DOMAIN,
source="user",
data={
CONF_TYPE: CONF_TYPE_OWSERVER,
CONF_HOST: "1.2.3.4",
CONF_PORT: "1234",
},
unique_id=f"{CONF_TYPE_OWSERVER}:1.2.3.4:1234",
connection_class=CONN_CLASS_LOCAL_POLL,
options={},
entry_id="2",
)
config_entry_owserver.add_to_hass(hass)
with patch("homeassistant.components.onewire.onewirehub.protocol.proxy") as owproxy:
owproxy.return_value.dir.side_effect = OwnetError
await hass.config_entries.async_setup(config_entry_owserver.entry_id)
await hass.async_block_till_done()
return config_entry_owserver
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
config_entry_owserver = await setup_onewire_owserver_integration(hass)
config_entry_sysbus = await setup_onewire_sysbus_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
assert config_entry_owserver.state == ENTRY_STATE_LOADED
assert config_entry_sysbus.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(config_entry_owserver.entry_id)
assert await hass.config_entries.async_unload(config_entry_sysbus.entry_id)
await hass.async_block_till_done()
assert config_entry_owserver.state == ENTRY_STATE_NOT_LOADED
assert config_entry_sysbus.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
| mit |
wskplho/sl4a | python/src/Lib/encodings/mac_cyrillic.py | 593 | 13710 | """ Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_comment09.py | 1 | 1227 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'comment09.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_comment('A1', 'Some text', {'author': 'John'})
worksheet.write_comment('A2', 'Some text', {'author': 'Perl'})
worksheet.write_comment('A3', 'Some text')
worksheet.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
goodwinnk/intellij-community | python/lib/Lib/encodings/koi8_r.py | 593 | 14035 | """ Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
lidavidm/sympy | sympy/physics/mechanics/tests/test_particle.py | 46 | 1391 | from sympy import symbols
from sympy.physics.mechanics import Point, Particle, ReferenceFrame
def test_particle():
m, m2, v1, v2, v3, r, g, h = symbols('m m2 v1 v2 v3 r g h')
P = Point('P')
P2 = Point('P2')
p = Particle('pa', P, m)
assert p.mass == m
assert p.point == P
# Test the mass setter
p.mass = m2
assert p.mass == m2
# Test the point setter
p.point = P2
assert p.point == P2
# Test the linear momentum function
N = ReferenceFrame('N')
O = Point('O')
P2.set_pos(O, r * N.y)
P2.set_vel(N, v1 * N.x)
assert p.linear_momentum(N) == m2 * v1 * N.x
assert p.angular_momentum(O, N) == -m2 * r *v1 * N.z
P2.set_vel(N, v2 * N.y)
assert p.linear_momentum(N) == m2 * v2 * N.y
assert p.angular_momentum(O, N) == 0
P2.set_vel(N, v3 * N.z)
assert p.linear_momentum(N) == m2 * v3 * N.z
assert p.angular_momentum(O, N) == m2 * r * v3 * N.x
P2.set_vel(N, v1 * N.x + v2 * N.y + v3 * N.z)
assert p.linear_momentum(N) == m2 * (v1 * N.x + v2 * N.y + v3 * N.z)
assert p.angular_momentum(O, N) == m2 * r * (v3 * N.x - v1 * N.z)
p.set_potential_energy(m * g * h)
assert p.potential_energy == m * g * h
# TODO make the result not be system-dependent
assert p.kinetic_energy(
N) in [m2*(v1**2 + v2**2 + v3**2)/2,
m2 * v1**2 / 2 + m2 * v2**2 / 2 + m2 * v3**2 / 2]
| bsd-3-clause |
crepererum/invenio | invenio/modules/records/testsuite/functions/sync_meeting_names.py | 33 | 1888 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.modules.jsonalchemy.jsonext.functions.util_merge_fields_info_list \
import util_merge_fields_info_list
def sync_meeting_names(self, field_name, connected_field, action): # pylint: disable=W0613
"""
Sync corparate names content only when `__setitem__` or similar is used
"""
if action == 'set':
if field_name == 'corporate_names' and self.get('corporate_names'):
self.__setitem__('_first_corporate_name',
self['corporate_names'][0],
exclude=['connect'])
if self['corporate_names'][1:]:
self.__setitem__('_additional_corporate_names',
self['corporate_names'][1:],
exclude=['connect'])
elif field_name in ('_first_author', '_additional_authors'):
self.__setitem__(
'corporate_names',
util_merge_fields_info_list(self, ['_first_corporate_name',
'_additional_corporate_names']),
exclude=['connect'])
| gpl-2.0 |
manuq/sugar-toolkit-gtk3 | examples/ticket2855.py | 9 | 1630 | # Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
box = Gtk.HBox()
toggle = Gtk.ToggleButton()
icon = Icon(icon_name='go-previous', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
toggle.set_image(icon)
box.pack_start(toggle, False)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', icon_size=Gtk.IconSize.LARGE_TOOLBAR)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)
| lgpl-2.1 |
ryfeus/lambda-packs | Tensorflow/source/concurrent/futures/_base.py | 24 | 22424 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import collections
import logging
import threading
import itertools
import time
import types
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
except BaseException:
# Explicitly let all other new-style exceptions through so
# that we can catch all old-style exceptions with a simple
# "except:" clause below.
#
# All old-style exception objects are instances of
# types.InstanceType, but "except types.InstanceType:" does
# not catch old-style exceptions for some reason. Thus, the
# only way to catch all old-style exceptions without catching
# any new-style exceptions is to filter out the new-style
# exceptions, which all derive from BaseException.
raise
except:
# Because of the BaseException clause above, this handler only
# executes for old-style exception objects.
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
if isinstance(self._exception, types.InstanceType):
# The exception is an instance of an old-style class, which
# means type(self._exception) returns types.ClassType instead
# of the exception's actual class type.
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| mit |
dribnet/keras | tests/auto/test_regularizers.py | 75 | 2372 | import unittest
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Merge, Dense, Activation, Flatten, ActivityRegularization
from keras.layers.embeddings import Embedding
from keras.datasets import mnist
from keras.utils import np_utils
from keras import regularizers
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
def create_model(weight_reg=None, activity_reg=None):
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10, W_regularizer=weight_reg, activity_regularizer=activity_reg))
model.add(Activation('softmax'))
return model
class TestRegularizers(unittest.TestCase):
def test_W_reg(self):
for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
model = create_model(weight_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
def test_A_reg(self):
for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
model = create_model(activity_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
if __name__ == '__main__':
print('Test weight and activity regularizers')
unittest.main()
| mit |
barachka/odoo | addons/event/wizard/event_confirm.py | 339 | 1387 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class event_confirm(models.TransientModel):
"""Event Confirmation"""
_name = "event.confirm"
@api.multi
def confirm(self):
events = self.env['event.event'].browse(self._context.get('event_ids', []))
events.do_confirm()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rolapp/plugin.video.zattooboxExt.beta | resources/zapisession.py | 1 | 4818 | # coding=utf-8
#
#
# ZapiSession
# (c) 2014 Pascal Nançoz
# modified by Daniel Griner
#
import os, re, base64
import urllib, urllib2
import json
class ZapiSession:
ZAPI_AUTH_URL = 'https://zattoo.com'
ZAPI_URL = 'https://zattoo.com'
DATA_FOLDER = None
COOKIE_FILE = None
SESSION_FILE = None
ACCOUNT_FILE = None
HttpHandler = None
Username = None
Password = None
SessionData = None
AccountData = None
def __init__(self, dataFolder):
self.DATA_FOLDER = dataFolder
self.COOKIE_FILE = os.path.join(dataFolder, 'cookie.cache')
self.SESSION_FILE = os.path.join(dataFolder, 'session.cache')
self.ACCOUNT_FILE = os.path.join(dataFolder, 'account.cache')
self.APICALL_FILE = os.path.join(dataFolder, 'apicall.cache')
self.HttpHandler = urllib2.build_opener()
self.HttpHandler.addheaders = [('Content-type', 'application/x-www-form-urlencoded'), ('Accept', 'application/json')]
def init_session(self, username, password):
self.Username = username
self.Password = password
return self.restore_session() or self.renew_session()
def restore_session(self):
if os.path.isfile(self.COOKIE_FILE) and os.path.isfile(self.ACCOUNT_FILE) and os.path.isfile(self.SESSION_FILE):
with open(self.ACCOUNT_FILE, 'r') as f:
accountData = json.loads(base64.b64decode(f.readline()))
if accountData['success'] == True:
self.AccountData = accountData
with open(self.COOKIE_FILE, 'r') as f:
self.set_cookie(base64.b64decode(f.readline()))
with open(self.SESSION_FILE, 'r') as f:
self.SessionData = json.loads(base64.b64decode(f.readline()))
return True
return False
def extract_sessionId(self, cookieContent):
if cookieContent is not None:
return re.search("beaker\.session\.id\s*=\s*([^\s;]*)", cookieContent).group(1)
return None
def get_accountData(self):
accountData={}
if os.path.isfile(self.ACCOUNT_FILE):
with open(self.ACCOUNT_FILE, 'r') as f:
accountData = json.loads(base64.b64decode(f.readline()))
return accountData
def persist_accountData(self, accountData):
with open(self.ACCOUNT_FILE, 'w') as f:
f.write(base64.b64encode(json.dumps(accountData)))
def persist_sessionId(self, sessionId):
with open(self.COOKIE_FILE, 'w') as f:
f.write(base64.b64encode(sessionId))
def persist_sessionData(self, sessionData):
with open(self.SESSION_FILE, 'w') as f:
f.write(base64.b64encode(json.dumps(sessionData)))
def set_cookie(self, sessionId):
self.HttpHandler.addheaders.append(('Cookie', 'beaker.session.id=' + sessionId))
def request_url(self, url, params):
try:
response = self.HttpHandler.open(url, urllib.urlencode(params) if params is not None else None)
if response is not None:
sessionId = self.extract_sessionId(response.info().getheader('Set-Cookie'))
if sessionId is not None:
self.set_cookie(sessionId)
self.persist_sessionId(sessionId)
return response.read()
except Exception:
pass
return None
# zapiCall with params=None creates GET request otherwise POST
def exec_zapiCall(self, api, params, context='default'):
url = self.ZAPI_AUTH_URL + api if context == 'session' else self.ZAPI_URL + api
content = self.request_url(url, params)
if content is None and context != 'session' and self.renew_session():
content = self.request_url(url, params)
if content is None:
return None
try:
resultData = json.loads(content)
return resultData
#if resultData['success']:
# return resultData
#except Exception:
#pass
#try:
#if resultData['title'] == "On Demand":
#return resultData
except Exception:
pass
return None
def fetch_appToken(self):
handle = urllib2.urlopen(self.ZAPI_URL + '/')
html = handle.read()
print "App-Token:" + str(re.search("window\.appToken\s*=\s*'(.*)'", html).group(1))
return re.search("window\.appToken\s*=\s*'(.*)'", html).group(1)
def session(self):
api = '/zapi/session/hello'
params = {"client_app_token" : self.fetch_appToken(),
"uuid" : "d7512e98-38a0-4f01-b820-5a5cf98141fe",
"lang" : "en",
"format" : "json"}
sessionData = self.exec_zapiCall(api, params, 'session')
if sessionData is not None:
self.SessionData = sessionData
self.persist_sessionData(sessionData)
return True
return False
def login(self):
api = '/zapi/account/login'
params = {"login": self.Username, "password" : self.Password}
accountData = self.exec_zapiCall(api, params, 'session')
if accountData is not None:
self.AccountData = accountData
self.persist_accountData(accountData)
return True
return False
def renew_session(self):
return self.session() and self.login()
| bsd-2-clause |
nextgis/buildbot | worker/deb_util.py | 1 | 14217 | # -*- coding: utf-8 -*-
################################################################################
# Project: ppa deb buildbot utility
# Purpose: prepare debian changelog and package
# Author: Dmitry Baryshnikov, dmitry.baryshnikov@nextgis.ru
################################################################################
# Copyright (C) 2016-2020, NextGIS <info@nextgis.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import sys
import os
import argparse
import subprocess
from datetime import datetime
import json
import base64
import glob
import shutil
from urllib.request import urlopen, Request, HTTPError
format_simple = '--pretty=format:%h - %an : %s'
format_debian = '--pretty=format: * %h - %an : %s'
repka_site = 'rm.nextgis.com'
repka_endpoint = 'https://{}'.format(repka_site)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
OKGRAY = '\033[0;37m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DGRAY='\033[1;30m'
LRED='\033[1;31m'
LGREEN='\033[1;32m'
LYELLOW='\033[1;33m'
LBLUE='\033[1;34m'
LMAGENTA='\033[1;35m'
LCYAN='\033[1;36m'
WHITE='\033[1;37m'
def color_print(text, bold, color):
if sys.platform == 'win32':
print(text)
else:
out_text = ''
if bold:
out_text += bcolors.BOLD
if color == 'GREEN':
out_text += bcolors.OKGREEN
elif color == 'LGREEN':
out_text += bcolors.LGREEN
elif color == 'LYELLOW':
out_text += bcolors.LYELLOW
elif color == 'LMAGENTA':
out_text += bcolors.LMAGENTA
elif color == 'LCYAN':
out_text += bcolors.LCYAN
elif color == 'LRED':
out_text += bcolors.LRED
elif color == 'LBLUE':
out_text += bcolors.LBLUE
elif color == 'DGRAY':
out_text += bcolors.DGRAY
elif color == 'OKGRAY':
out_text += bcolors.OKGRAY
else:
out_text += bcolors.OKGRAY
out_text += text + bcolors.ENDC
print(out_text)
def upload_file(file_path, username, password):
post_url = repka_endpoint + '/api/upload'
args = []
if username is not None and password is not None:
args = ['curl', '-u', username + ':' + password, '-F', 'file=@' + file_path,
post_url
]
else:
args = ['curl', '-F', 'file=@' + file_path, post_url]
load_response = subprocess.check_output(args)
response = json.loads(load_response.decode())
file_uid = response['file']
file_name = response['name']
color_print('Uploaded: {} / {}'.format(file_uid, file_name), True, 'LGREEN')
return file_uid, file_name
def add_auth(request, login, password):
if login is not None and password is not None:
base64string = base64.b64encode('{}:{}'.format(login, password).encode())
request.add_header("Authorization", "Basic {}".format(base64string.decode()))
return request
def get_packet_id(repo_id, packet_name, username, password):
url = repka_endpoint + '/api/packet?repository={}&filter={}'.format(repo_id, packet_name)
color_print('Check packet url: ' + url, False, 'OKGRAY')
request = Request(url)
request = add_auth(request, username, password)
response = urlopen(request)
packets = json.loads(response.read().decode('utf-8'))
for packet in packets:
if packet['name'] == packet_name:
return packet['id']
return -1
def get_release_counter(packet_id, tag, distro_codename, username, password):
url = repka_endpoint + '/api/release?packet={}'.format(packet_id)
color_print('Check release url: ' + url, False, 'OKGRAY')
request = Request(url)
request = add_auth(request, username, password)
response = urlopen(request)
releases = json.loads(response.read().decode('utf-8'))
counter = 0
if releases is None:
color_print('Release ID not found', False, 'LCYAN')
return counter
for release in releases:
for tag_item in release['tags']:
if tag_item.startswith(tag):
for option_item in release['options']:
if option_item['value'] == distro_codename:
tag_parts = tag_item.split('+')
if len(tag_parts) > 1:
current_counter = int(tag_parts[1])
if current_counter >= counter:
counter = current_counter + 1
color_print('Release counter not found. Set to 0', False, 'LCYAN')
return counter
def create_release(packet_id, name, description, tag, distrib, distrib_version,
component, files, username, password):
url = repka_endpoint + '/api/release'
data = json.dumps({
"name": name,
"description": description,
"tags": [tag,],
"packet": packet_id,
"files": files,
"options": [
{"key": "distribution", "value": distrib},
{"key": "component", "value": component},
{"key": "version", "value": distrib_version},
]
})
request = Request(url, data=data.encode(), headers={'Content-Type': 'application/json'}) # , 'Content-Length': len(data)
request = add_auth(request, username, password)
try:
response = urlopen(request)
except HTTPError as e:
print(e.read())
exit(1)
release = json.loads(response.read().decode('utf-8'))
color_print('Release with ID {} created'.format(release['id']), False, 'LCYAN')
return release['id']
def write_changelog(package, version, counter, distro, repo_path, urgency='medium'):
full_message = '{} ({}+{}) {}; urgency={}\n\n'.format(package, version, counter, distro, urgency)
log_messages = subprocess.check_output(['git', 'log', format_debian, '-3', '--no-merges'], cwd=repo_path)
full_message += log_messages.decode(sys.stdout.encoding)
full_message += '\n\n -- '
full_message += os.environ.get('DEBFULLNAME', 'NextGIS')
full_message += ' <'+os.environ.get('DEBEMAIL', 'info@nextgis.com')+'> '
dt = datetime.utcnow()
full_message += dt.strftime("%a, %d %b %Y %H:%M:%S +0000\n\n")
changelog_path = os.path.join(repo_path, 'debian', 'changelog')
with open(changelog_path, 'w') as cl:
cl.write(full_message)
def get_distro():
import configparser, itertools
config = configparser.ConfigParser()
distro_version = ''
distro_codename = ''
with open('/etc/os-release') as fp:
config.read_file(itertools.chain(['[global]'], fp))
distro_version = config.get('global', 'VERSION_ID').replace('"', '')
distro_codename = config.get('global', 'VERSION_CODENAME')
return distro_version, distro_codename
def get_package_version(file):
version = ''
with open(args.version_file) as f:
version = f.readline().rstrip()
if version is None or version == '':
sys.exit('Cannot find version')
return version
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare debian package')
parser.add_argument('-vf', '--version_file', help='version.str path')
parser.add_argument('-op', '--operation', help='operation to process', choices=['info', 'changelog', 'tar', 'make_release', 'create_debian', 'add_repka_repo', 'add_deb_repo',], required=True)
parser.add_argument('-rp', '--repo_path', help='repository path')
parser.add_argument('-dp', '--deb_files_path', help='deb files path')
parser.add_argument('-pn', '--package_name', help='package name')
parser.add_argument('--repo_id', dest='repo_id', help='{} repository identifier'.format(repka_endpoint))
parser.add_argument('--repo_component', dest='repo_component', default='main', help='Repository component. Usually main, contfib and non-free')
parser.add_argument('--deb', dest='deb', help='deb line to add to sources.list')
parser.add_argument('--deb_key', dest='deb_key', help='deb key to check packages sign')
parser.add_argument('--deb_keyserver', dest='deb_keyserver', help='deb keyserver to verify sign')
parser.add_argument('--login', dest='login', help='login for {}'.format(repka_endpoint))
parser.add_argument('--password', dest='password', help='password for {}'.format(repka_endpoint))
args = parser.parse_args()
if args.operation == 'info':
distro_version, distro_codename = get_distro()
version = get_package_version(args.version_file)
print('Package: {}\nVersion: {}\nDistribution:\n * version - {}\n * codename - {}\n'.format(args.package_name, version, distro_version, distro_codename))
elif args.operation == 'changelog':
distro_version, distro_codename = get_distro()
version = get_package_version(args.version_file)
packet_id = get_packet_id(args.repo_id, args.package_name, args.login, args.password)
counter = get_release_counter(packet_id, version, distro_codename, args.login, args.password)
write_changelog(args.package_name, version, counter, distro_codename, args.repo_path)
elif args.operation == 'tar':
distro_version, distro_codename = get_distro()
version = get_package_version(args.version_file)
packet_id = get_packet_id(args.repo_id, args.package_name, args.login, args.password)
counter = get_release_counter(packet_id, version, distro_codename, args.login, args.password)
subprocess.call(["tar", '-caf', '{}_{}+{}.orig.tar.gz'.format(args.package, version, counter), args.repo_path, '--exclude-vcs'])
elif args.operation == 'make_release':
packet_id = get_packet_id(args.repo_id, args.package_name, args.login, args.password)
if packet_id == -1:
color_print('Packet {} not found in repository'.format(args.package_name), True, 'LRED')
exit(1)
deb_files = glob.glob(os.path.join(args.deb_files_path, '*.deb'))
uploaded_files = []
for deb_file in deb_files:
file_uid, file_name = upload_file(deb_file, args.login, args.password)
uploaded_files.append({"upload_name": file_uid, "name": file_name})
version = get_package_version(args.version_file)
distro_version, distro_codename = get_distro()
counter = get_release_counter(packet_id, version, distro_codename, args.login, args.password)
tag_name = '{}+{}'.format(version, counter)
create_release(packet_id, 'v{} [{}]'.format(tag_name, distro_codename), 'Version ' + tag_name + ' on ' + distro_codename, tag_name,
distro_codename, distro_version, args.repo_component,
uploaded_files, args.login, args.password)
elif args.operation == 'create_debian':
# 1 clone ppa
if os.path.exists('ppa') == False:
subprocess.call(["git", 'clone', '--depth', '1', 'https://github.com/nextgis/ppa.git'])
# 2 copy debian into repo
ppa_path = os.path.join('ppa', args.package_name)
if os.path.exists(ppa_path) == False:
sys.exit('No debian directory in path {}'.format(args.operation))
distro_version, distro_codename = get_distro()
ppa_dist_path = os.path.join(ppa_path, distro_codename)
out_path = os.path.join(args.repo_path, 'debian')
shutil.rmtree(out_path, True)
if os.path.exists(ppa_dist_path) == False:
ppa_dist_path = os.path.join(ppa_path, 'debian')
shutil.copytree(ppa_dist_path, out_path)
elif args.operation == 'add_repka_repo':
distro_version, distro_codename = get_distro()
# https://rm.nextgis.com/api/repo/11/deb stretch Release
url = repka_endpoint + '/api/repo/{}/deb/dists/{}/Release'.format(args.repo_id, distro_codename)
print('Check {}'.format(url))
try:
# 1. Check exists
request = Request(url)
request = add_auth(request, args.login, args.password)
response = urlopen(request)
# 2. Add repo
curl_user_key = ''
if args.login is not None and args.password is not None:
curl_user_key = '--user "{}:{}" '.format(args.login, args.password)
with open('/etc/apt/auth.conf.d/rm.conf', 'w') as repka_file:
repka_file.write("""machine {}
login {}
password {}""".format(repka_site, args.login, args.password))
subprocess.call(["/bin/sh", "-c", "echo deb {}/api/repo/{}/deb {} {} | tee -a /etc/apt/sources.list".format(repka_endpoint, args.repo_id, distro_codename, args.repo_component)])
subprocess.call(["/bin/sh", "-c", "curl {}-s -L {}/api/repo/{}/deb/key.gpg | apt-key add -".format(curl_user_key, repka_endpoint, args.repo_id)])
except:
print('Skip add repo: {}/api/repo/{}/deb {} {}'.format(repka_endpoint, args.repo_id, distro_codename, args.repo_component))
pass
subprocess.call(["apt", 'update'])
elif args.operation == 'add_deb_repo':
subprocess.call(["/bin/sh", "-c", "echo {} | tee -a /etc/apt/sources.list".format(args.deb)])
subprocess.call(["/bin/sh", "-c", "apt-key adv --keyserver {} --recv-keys {}".format(args.deb_keyserver, args.deb_key)])
subprocess.call(["apt", 'update'])
else:
sys.exit('Unsupported operation {}'.format(args.operation))
| gpl-2.0 |
iuliux/RegExTractor | utils.py | 1 | 2923 | import numpy as np
def long_substr(strgs):
"""
Returns a list with the longest common substring sequences from @strgs
Based on: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
"""
# Copy the list
strgs = strgs[:]
if len(strgs) > 1 and len(strgs[0]) > 0:
substrs = []
substr = None
maxlen = 1
while True:
if substr is not None and len(substr) >= maxlen:
# A max lenght seq
substrs.append(substr)
maxlen = len(substr)
for i, s in enumerate(strgs):
strgs[i] = s.replace(substr, '', 1)
elif substr is not None and len(substr) < maxlen:
# Not the first run and not longest seq also
break
substr = ''
for i in range(len(strgs[0])):
for j in range(len(strgs[0]) - i + 1):
if j > len(substr) and all(strgs[0][i:i+j] in x for x in strgs):
substr = strgs[0][i:i+j]
return substrs
elif len(strgs) == 1:
return [strgs[0]] if len(strgs[0]) > 0 else []
else:
return []
def levenshtein(source, target):
""" Computes the Levenshtein distance between 2 strings """
if len(source) < len(target):
return levenshtein(target, source)
# So now we have len(source) >= len(target).
if len(target) == 0:
return len(source)
# We call tuple() to force strings to be used as sequences
# ('c', 'a', 't', 's') - numpy uses them as values by default.
source = np.array(tuple(source))
target = np.array(tuple(target))
# We use a dynamic programming algorithm, but with the
# added optimization that we only need the last two rows
# of the matrix.
previous_row = np.arange(target.size + 1)
for s in source:
# Insertion (target grows longer than source):
current_row = previous_row + 1
# Substitution or matching:
# Target and source items are aligned, and either
# are different (cost of 1), or are the same (cost of 0).
current_row[1:] = np.minimum(current_row[1:],
np.add(previous_row[:-1], target != s))
# Deletion (target grows shorter than source):
current_row[1:] = np.minimum(current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
if __name__ == '__main__':
s1 = 'Oh, hello, my friend...'
s2 = 'I prefer Jelly Belly beans...'
s3 = 'When hell freezes... over!'
print long_substr([s1, s2, s3])
print long_substr(['0', 'a'])
print long_substr(['abba'])
print long_substr([''])
print long_substr([])
print levenshtein(s1, s2)
print levenshtein(s1, s3)
print levenshtein(s2, s3)
| mit |
keerts/home-assistant | homeassistant/components/sensor/scrape.py | 13 | 3266 | """
Support for getting data from websites with scraping.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.scrape/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.components.sensor.rest import RestData
from homeassistant.const import (
CONF_NAME, CONF_RESOURCE, CONF_UNIT_OF_MEASUREMENT, STATE_UNKNOWN,
CONF_VALUE_TEMPLATE, CONF_VERIFY_SSL)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['beautifulsoup4==4.5.3']
_LOGGER = logging.getLogger(__name__)
CONF_SELECT = 'select'
DEFAULT_NAME = 'Web scrape'
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.string,
vol.Required(CONF_SELECT): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Web scrape sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
method = 'GET'
payload = auth = headers = None
verify_ssl = config.get(CONF_VERIFY_SSL)
select = config.get(CONF_SELECT)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
rest = RestData(method, resource, auth, headers, payload, verify_ssl)
rest.update()
if rest.data is None:
_LOGGER.error("Unable to fetch data from %s", resource)
return False
add_devices([
ScrapeSensor(hass, rest, name, select, value_template, unit)
])
class ScrapeSensor(Entity):
"""Representation of a web scrape sensor."""
def __init__(self, hass, rest, name, select, value_template, unit):
"""Initialize a web scrape sensor."""
self.rest = rest
self._name = name
self._state = STATE_UNKNOWN
self._select = select
self._value_template = value_template
self._unit_of_measurement = unit
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest data from the source and updates the state."""
self.rest.update()
from bs4 import BeautifulSoup
raw_data = BeautifulSoup(self.rest.data, 'html.parser')
_LOGGER.debug(raw_data)
value = raw_data.select(self._select)[0].text
_LOGGER.debug(value)
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN)
else:
self._state = value
| apache-2.0 |
pepeportela/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/caching_descriptor_system.py | 32 | 16966 | import sys
import logging
from contracts import contract, new_contract
from fs.osfs import OSFS
from lazy import lazy
from xblock.runtime import KvsFieldData, KeyValueStore
from xblock.fields import ScopeIds
from xblock.core import XBlock
from opaque_keys.edx.locator import BlockUsageLocator, LocalId, CourseLocator, LibraryLocator, DefinitionLocator
from xmodule.library_tools import LibraryToolsService
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xmodule.modulestore import BlockData
from xmodule.modulestore.edit_info import EditInfoRuntimeMixin
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.inheritance import inheriting_field_data, InheritanceMixin
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.split_mongo.id_manager import SplitMongoIdManager
from xmodule.modulestore.split_mongo.definition_lazy_loader import DefinitionLazyLoader
from xmodule.modulestore.split_mongo.split_mongo_kvs import SplitMongoKVS
from xmodule.x_module import XModuleMixin
log = logging.getLogger(__name__)
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('CourseLocator', CourseLocator)
new_contract('LibraryLocator', LibraryLocator)
new_contract('BlockKey', BlockKey)
new_contract('BlockData', BlockData)
new_contract('CourseEnvelope', CourseEnvelope)
new_contract('XBlock', XBlock)
class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
"""
A system that has a cache of a course version's json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data.
Computes the settings (nee 'metadata') inheritance upon creation.
"""
@contract(course_entry=CourseEnvelope)
def __init__(self, modulestore, course_entry, default_class, module_data, lazy, **kwargs):
"""
Computes the settings inheritance and sets up the cache.
modulestore: the module store that can be used to retrieve additional
modules
course_entry: the originally fetched enveloped course_structure w/ branch and course id info.
Callers to _load_item provide an override but that function ignores the provided structure and
only looks at the branch and course id
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
"""
# needed by capa_problem (as runtime.filestore via this.resources_fs)
if course_entry.course_key.course:
root = modulestore.fs_root / course_entry.course_key.org / course_entry.course_key.course / course_entry.course_key.run
else:
root = modulestore.fs_root / str(course_entry.structure['_id'])
root.makedirs_p() # create directory if it doesn't exist
id_manager = SplitMongoIdManager(self)
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
super(CachingDescriptorSystem, self).__init__(
field_data=None,
load_item=self._load_item,
resources_fs=OSFS(root),
**kwargs
)
self.modulestore = modulestore
self.course_entry = course_entry
# set course_id attribute to avoid problems with subsystems that expect
# it here. (grading, for example)
self.course_id = course_entry.course_key
self.lazy = lazy
self.module_data = module_data
self.default_class = default_class
self.local_modules = {}
self._services['library_tools'] = LibraryToolsService(modulestore)
@lazy
@contract(returns="dict(BlockKey: BlockKey)")
def _parent_map(self):
parent_map = {}
for block_key, block in self.course_entry.structure['blocks'].iteritems():
for child in block.fields.get('children', []):
parent_map[child] = block_key
return parent_map
@contract(usage_key="BlockUsageLocator | BlockKey", course_entry_override="CourseEnvelope | None")
def _load_item(self, usage_key, course_entry_override=None, **kwargs):
"""
Instantiate the xblock fetching it either from the cache or from the structure
:param course_entry_override: the course_info with the course_key to use (defaults to cached)
"""
# usage_key is either a UsageKey or just the block_key. if a usage_key,
if isinstance(usage_key, BlockUsageLocator):
# trust the passed in key to know the caller's expectations of which fields are filled in.
# particularly useful for strip_keys so may go away when we're version aware
course_key = usage_key.course_key
if isinstance(usage_key.block_id, LocalId):
try:
return self.local_modules[usage_key]
except KeyError:
raise ItemNotFoundError
else:
block_key = BlockKey.from_usage_key(usage_key)
version_guid = self.course_entry.course_key.version_guid
else:
block_key = usage_key
course_info = course_entry_override or self.course_entry
course_key = course_info.course_key
version_guid = course_key.version_guid
# look in cache
cached_module = self.modulestore.get_cached_block(course_key, version_guid, block_key)
if cached_module:
return cached_module
block_data = self.get_module_data(block_key, course_key)
class_ = self.load_block_type(block_data.block_type)
block = self.xblock_from_json(class_, course_key, block_key, block_data, course_entry_override, **kwargs)
# TODO Once TNL-5092 is implemented, we can expose the course version
# information within the key identifier of the block. Until then, set
# the course_version as a field on the returned block so higher layers
# can use it when needed.
block.course_version = version_guid
self.modulestore.cache_block(course_key, version_guid, block_key, block)
return block
@contract(block_key=BlockKey, course_key="CourseLocator | LibraryLocator")
def get_module_data(self, block_key, course_key):
"""
Get block from module_data adding it to module_data if it's not already there but is in the structure
Raises:
ItemNotFoundError if block is not in the structure
"""
json_data = self.module_data.get(block_key)
if json_data is None:
# deeper than initial descendant fetch or doesn't exist
self.modulestore.cache_items(self, [block_key], course_key, lazy=self.lazy)
json_data = self.module_data.get(block_key)
if json_data is None:
raise ItemNotFoundError(block_key)
return json_data
# xblock's runtime does not always pass enough contextual information to figure out
# which named container (course x branch) or which parent is requesting an item. Because split allows
# a many:1 mapping from named containers to structures and because item's identities encode
# context as well as unique identity, this function must sometimes infer whether the access is
# within an unspecified named container. In most cases, course_entry_override will give the
# explicit context; however, runtime.get_block(), e.g., does not. HOWEVER, there are simple heuristics
# which will work 99.999% of the time: a runtime is thread & even context specific. The likelihood that
# the thread is working with more than one named container pointing to the same specific structure is
# low; thus, the course_entry is most likely correct. If the thread is looking at > 1 named container
# pointing to the same structure, the access is likely to be chunky enough that the last known container
# is the intended one when not given a course_entry_override; thus, the caching of the last branch/course id.
@contract(block_key="BlockKey | None")
def xblock_from_json(self, class_, course_key, block_key, block_data, course_entry_override=None, **kwargs):
"""
Load and return block info.
"""
if course_entry_override is None:
course_entry_override = self.course_entry
else:
# most recent retrieval is most likely the right one for next caller (see comment above fn)
self.course_entry = CourseEnvelope(course_entry_override.course_key, self.course_entry.structure)
definition_id = block_data.definition
# If no usage id is provided, generate an in-memory id
if block_key is None:
block_key = BlockKey(block_data.block_type, LocalId())
convert_fields = lambda field: self.modulestore.convert_references_to_keys(
course_key, class_, field, self.course_entry.structure['blocks'],
)
if definition_id is not None and not block_data.definition_loaded:
definition_loader = DefinitionLazyLoader(
self.modulestore,
course_key,
block_key.type,
definition_id,
convert_fields,
)
else:
definition_loader = None
# If no definition id is provide, generate an in-memory id
if definition_id is None:
definition_id = LocalId()
# Construct the Block Usage Locator:
block_locator = course_key.make_usage_key(
block_type=block_key.type,
block_id=block_key.id,
)
converted_fields = convert_fields(block_data.fields)
converted_defaults = convert_fields(block_data.defaults)
if block_key in self._parent_map:
parent_key = self._parent_map[block_key]
parent = course_key.make_usage_key(parent_key.type, parent_key.id)
else:
parent = None
aside_fields = None
# for the situation if block_data has no asides attribute
# (in case it was taken from memcache)
try:
if block_data.asides:
aside_fields = {block_key.type: {}}
for aside in block_data.asides:
aside_fields[block_key.type].update(aside['fields'])
except AttributeError:
pass
try:
kvs = SplitMongoKVS(
definition_loader,
converted_fields,
converted_defaults,
parent=parent,
aside_fields=aside_fields,
field_decorator=kwargs.get('field_decorator')
)
if InheritanceMixin in self.modulestore.xblock_mixins:
field_data = inheriting_field_data(kvs)
else:
field_data = KvsFieldData(kvs)
module = self.construct_xblock_from_class(
class_,
ScopeIds(None, block_key.type, definition_id, block_locator),
field_data,
for_parent=kwargs.get('for_parent')
)
except Exception: # pylint: disable=broad-except
log.warning("Failed to load descriptor", exc_info=True)
return ErrorDescriptor.from_json(
block_data,
self,
course_entry_override.course_key.make_usage_key(
block_type='error',
block_id=block_key.id
),
error_msg=exc_info_to_str(sys.exc_info())
)
edit_info = block_data.edit_info
module._edited_by = edit_info.edited_by # pylint: disable=protected-access
module._edited_on = edit_info.edited_on # pylint: disable=protected-access
module.previous_version = edit_info.previous_version
module.update_version = edit_info.update_version
module.source_version = edit_info.source_version
module.definition_locator = DefinitionLocator(block_key.type, definition_id)
for wrapper in self.modulestore.xblock_field_data_wrappers:
module._field_data = wrapper(module, module._field_data) # pylint: disable=protected-access
# decache any pending field settings
module.save()
# If this is an in-memory block, store it in this system
if isinstance(block_locator.block_id, LocalId):
self.local_modules[block_locator] = module
return module
def get_edited_by(self, xblock):
"""
See :meth: cms.lib.xblock.runtime.EditInfoRuntimeMixin.get_edited_by
"""
return xblock._edited_by
def get_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edited_on
@contract(xblock='XBlock')
def get_subtree_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
# pylint: disable=protected-access
if not hasattr(xblock, '_subtree_edited_by'):
block_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if block_data.edit_info._subtree_edited_by is None:
self._compute_subtree_edited_internal(
block_data, xblock.location.course_key
)
xblock._subtree_edited_by = block_data.edit_info._subtree_edited_by
return xblock._subtree_edited_by
@contract(xblock='XBlock')
def get_subtree_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
# pylint: disable=protected-access
if not hasattr(xblock, '_subtree_edited_on'):
block_data = self.module_data[BlockKey.from_usage_key(xblock.location)]
if block_data.edit_info._subtree_edited_on is None:
self._compute_subtree_edited_internal(
block_data, xblock.location.course_key
)
xblock._subtree_edited_on = block_data.edit_info._subtree_edited_on
return xblock._subtree_edited_on
def get_published_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_published_by'):
self.modulestore.compute_published_info_internal(xblock)
return getattr(xblock, '_published_by', None)
def get_published_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
if not hasattr(xblock, '_published_on'):
self.modulestore.compute_published_info_internal(xblock)
return getattr(xblock, '_published_on', None)
@contract(block_data='BlockData')
def _compute_subtree_edited_internal(self, block_data, course_key):
"""
Recurse the subtree finding the max edited_on date and its corresponding edited_by. Cache it.
"""
# pylint: disable=protected-access
max_date = block_data.edit_info.edited_on
max_date_by = block_data.edit_info.edited_by
for child in block_data.fields.get('children', []):
child_data = self.get_module_data(BlockKey(*child), course_key)
if block_data.edit_info._subtree_edited_on is None:
self._compute_subtree_edited_internal(child_data, course_key)
if child_data.edit_info._subtree_edited_on > max_date:
max_date = child_data.edit_info._subtree_edited_on
max_date_by = child_data.edit_info._subtree_edited_by
block_data.edit_info._subtree_edited_on = max_date
block_data.edit_info._subtree_edited_by = max_date_by
def get_aside_of_type(self, block, aside_type):
"""
See `runtime.Runtime.get_aside_of_type`
This override adds the field data from the block to the aside
"""
asides_cached = block.get_asides() if isinstance(block, XModuleMixin) else None
if asides_cached:
for aside in asides_cached:
if aside.scope_ids.block_type == aside_type:
return aside
new_aside = super(CachingDescriptorSystem, self).get_aside_of_type(block, aside_type)
new_aside._field_data = block._field_data # pylint: disable=protected-access
for key, _ in new_aside.fields.iteritems():
if isinstance(key, KeyValueStore.Key) and block._field_data.has(new_aside, key): # pylint: disable=protected-access
try:
value = block._field_data.get(new_aside, key) # pylint: disable=protected-access
except KeyError:
pass
else:
setattr(new_aside, key, value)
block.add_aside(new_aside)
return new_aside
| agpl-3.0 |
rex-xxx/mt6572_x201 | external/chromium/testing/gtest/test/gtest_xml_output_unittest.py | 397 | 11279 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| gpl-2.0 |
leiferikb/bitpop | src/tools/symsrc/pdb_fingerprint_from_img.py | 179 | 2017 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This will retrieve a PDBs "fingerprint" from it's corresponding executable
image (.dll or .exe). This is used when retrieving the PDB from the symbol
server. The .pdb (or cab compressed .pd_) is expected at a path like:
foo.pdb/FINGERPRINT/foo.pdb
We can retrieve the same information from the .PDB file itself, but this file
format is much more difficult and undocumented. Instead, we can look at the
DLL's reference to the PDB, and use that to retrieve the information."""
import sys
import pefile
__CV_INFO_PDB70_format__ = ('CV_INFO_PDB70',
('4s,CvSignature', '16s,Signature', 'L,Age'))
__GUID_format__ = ('GUID',
('L,Data1', 'H,Data2', 'H,Data3', '8s,Data4'))
def GetPDBInfoFromImg(filename):
"""Returns the PDB fingerprint and the pdb filename given an image file"""
pe = pefile.PE(filename)
for dbg in pe.DIRECTORY_ENTRY_DEBUG:
if dbg.struct.Type == 2: # IMAGE_DEBUG_TYPE_CODEVIEW
off = dbg.struct.AddressOfRawData
size = dbg.struct.SizeOfData
data = pe.get_memory_mapped_image()[off:off+size]
cv = pefile.Structure(__CV_INFO_PDB70_format__)
cv.__unpack__(data)
cv.PdbFileName = data[cv.sizeof():]
guid = pefile.Structure(__GUID_format__)
guid.__unpack__(cv.Signature)
guid.Data4_0 = ''.join("%02X" % ord(x) for x in guid.Data4[0:2])
guid.Data4_1 = ''.join("%02X" % ord(x) for x in guid.Data4[2:])
return ("%08X%04X%04X%s%s%d" % (
guid.Data1, guid.Data2, guid.Data3,
guid.Data4_0, guid.Data4_1, cv.Age),
cv.PdbFileName.split('\x00', 1)[0])
break
def main():
if len(sys.argv) != 2:
print "usage: file.dll"
return 1
(fingerprint, filename) = GetPDBInfoFromImg(sys.argv[1])
print "%s %s" % (fingerprint, filename)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
harukaeru/django-nose | django_nose/plugin.py | 8 | 10545 | # coding: utf-8
"""Included django-nose plugins."""
from __future__ import unicode_literals
import sys
from nose.plugins.base import Plugin
from nose.suite import ContextSuite
from django.test.testcases import TransactionTestCase, TestCase
from django_nose.testcases import FastFixtureTestCase
from django_nose.utils import process_tests, is_subclass_at_all
class AlwaysOnPlugin(Plugin):
"""A base plugin that takes no options and is always enabled."""
def options(self, parser, env):
"""Avoid adding a ``--with`` option for this plugin.
We don't have any options, and this plugin is always enabled, so we
don't want to use superclass's ``options()`` method which would add a
``--with-*`` option.
"""
def configure(self, *args, **kw_args):
"""Configure and enable this plugin."""
super(AlwaysOnPlugin, self).configure(*args, **kw_args)
self.enabled = True
class ResultPlugin(AlwaysOnPlugin):
"""Captures the TestResult object for later inspection.
nose doesn't return the full test result object from any of its runner
methods. Pass an instance of this plugin to the TestProgram and use
``result`` after running the tests to get the TestResult object.
"""
name = 'result'
def finalize(self, result):
"""Finalize test run by capturing the result."""
self.result = result
class DjangoSetUpPlugin(AlwaysOnPlugin):
"""Configures Django to set up and tear down the environment.
This allows coverage to report on all code imported and used during the
initialization of the test runner.
"""
name = 'django setup'
score = 150
def __init__(self, runner):
"""Initialize the plugin with the test runner."""
super(DjangoSetUpPlugin, self).__init__()
self.runner = runner
self.sys_stdout = sys.stdout
def prepareTest(self, test):
"""Create the Django DB and model tables, and do other setup.
This isn't done in begin() because that's too early--the DB has to be
set up *after* the tests are imported so the model registry contains
models defined in tests.py modules. Models are registered at
declaration time by their metaclass.
prepareTestRunner() might also have been a sane choice, except that, if
some plugin returns something from it, none of the other ones get
called. I'd rather not dink with scores if I don't have to.
"""
# What is this stdout switcheroo for?
sys_stdout = sys.stdout
sys.stdout = self.sys_stdout
self.runner.setup_test_environment()
self.old_names = self.runner.setup_databases()
sys.stdout = sys_stdout
def finalize(self, result):
"""Finalize test run by cleaning up databases and environment."""
self.runner.teardown_databases(self.old_names)
self.runner.teardown_test_environment()
class Bucketer(object):
"""Collect tests into buckets with similar setup requirements."""
def __init__(self):
"""Initialize the test buckets."""
# { (frozenset(['users.json']), True):
# [ContextSuite(...), ContextSuite(...)] }
self.buckets = {}
# All the non-FastFixtureTestCase tests we saw, in the order they came
# in:
self.remainder = []
def add(self, test):
"""Add test into an initialization bucket.
Tests are bucketed according to its set of fixtures and the
value of its exempt_from_fixture_bundling attr.
"""
if is_subclass_at_all(test.context, FastFixtureTestCase):
# We bucket even FFTCs that don't have any fixtures, but it
# shouldn't matter.
key = (frozenset(getattr(test.context, 'fixtures', [])),
getattr(test.context,
'exempt_from_fixture_bundling',
False))
self.buckets.setdefault(key, []).append(test)
else:
self.remainder.append(test)
class TestReorderer(AlwaysOnPlugin):
"""Reorder tests for various reasons."""
name = 'django-nose-test-reorderer'
def options(self, parser, env):
"""Add --with-fixture-bundling to options."""
super(TestReorderer, self).options(parser, env) # pointless
parser.add_option('--with-fixture-bundling',
action='store_true',
dest='with_fixture_bundling',
default=env.get('NOSE_WITH_FIXTURE_BUNDLING', False),
help='Load a unique set of fixtures only once, even '
'across test classes. '
'[NOSE_WITH_FIXTURE_BUNDLING]')
def configure(self, options, conf):
"""Configure plugin, reading the with_fixture_bundling option."""
super(TestReorderer, self).configure(options, conf)
self.should_bundle = options.with_fixture_bundling
def _put_transaction_test_cases_last(self, test):
"""Reorder test suite so TransactionTestCase-based tests come last.
Django has a weird design decision wherein TransactionTestCase doesn't
clean up after itself. Instead, it resets the DB to a clean state only
at the *beginning* of each test:
https://docs.djangoproject.com/en/dev/topics/testing/?from=olddocs#
django. test.TransactionTestCase. Thus, Django reorders tests so
TransactionTestCases all come last. Here we do the same.
"I think it's historical. We used to have doctests also, adding cleanup
after each unit test wouldn't necessarily clean up after doctests, so
you'd have to clean on entry to a test anyway." was once uttered on
#django-dev.
"""
def filthiness(test):
"""Return a score of how messy a test leaves the environment.
Django's TransactionTestCase doesn't clean up the DB on teardown,
but it's hard to guess whether subclasses (other than TestCase) do.
We will assume they don't, unless they have a
``cleans_up_after_itself`` attr set to True. This is reasonable
because the odd behavior of TransactionTestCase is documented, so
subclasses should by default be assumed to preserve it.
Thus, things will get these comparands (and run in this order):
* 1: TestCase subclasses. These clean up after themselves.
* 1: TransactionTestCase subclasses with
cleans_up_after_itself=True. These include
FastFixtureTestCases. If you're using the
FixtureBundlingPlugin, it will pull the FFTCs out, reorder
them, and run them first of all.
* 2: TransactionTestCase subclasses. These leave a mess.
* 2: Anything else (including doctests, I hope). These don't care
about the mess you left, because they don't hit the DB or, if
they do, are responsible for ensuring that it's clean (as per
https://docs.djangoproject.com/en/dev/topics/testing/?from=
olddocs#writing-doctests)
"""
test_class = test.context
if (is_subclass_at_all(test_class, TestCase) or
(is_subclass_at_all(test_class, TransactionTestCase) and
getattr(test_class, 'cleans_up_after_itself', False))):
return 1
return 2
flattened = []
process_tests(test, flattened.append)
flattened.sort(key=filthiness)
return ContextSuite(flattened)
def _bundle_fixtures(self, test):
"""Reorder tests to minimize fixture loading.
I reorder FastFixtureTestCases so ones using identical sets
of fixtures run adjacently. I then put attributes on them
to advise them to not reload the fixtures for each class.
This takes support.mozilla.com's suite from 123s down to 94s.
FastFixtureTestCases are the only ones we care about, because
nobody else, in practice, pays attention to the ``_fb`` advisory
bits. We return those first, then any remaining tests in the
order they were received.
"""
def suite_sorted_by_fixtures(suite):
"""Flatten and sort a tree of Suites by fixture.
Add ``_fb_should_setup_fixtures`` and
``_fb_should_teardown_fixtures`` attrs to each test class to advise
it whether to set up or tear down (respectively) the fixtures.
Return a Suite.
"""
bucketer = Bucketer()
process_tests(suite, bucketer.add)
# Lay the bundles of common-fixture-having test classes end to end
# in a single list so we can make a test suite out of them:
flattened = []
for (key, fixture_bundle) in bucketer.buckets.items():
fixtures, is_exempt = key
# Advise first and last test classes in each bundle to set up
# and tear down fixtures and the rest not to:
if fixtures and not is_exempt:
# Ones with fixtures are sure to be classes, which means
# they're sure to be ContextSuites with contexts.
# First class with this set of fixtures sets up:
first = fixture_bundle[0].context
first._fb_should_setup_fixtures = True
# Set all classes' 1..n should_setup to False:
for cls in fixture_bundle[1:]:
cls.context._fb_should_setup_fixtures = False
# Last class tears down:
last = fixture_bundle[-1].context
last._fb_should_teardown_fixtures = True
# Set all classes' 0..(n-1) should_teardown to False:
for cls in fixture_bundle[:-1]:
cls.context._fb_should_teardown_fixtures = False
flattened.extend(fixture_bundle)
flattened.extend(bucketer.remainder)
return ContextSuite(flattened)
return suite_sorted_by_fixtures(test)
def prepareTest(self, test):
"""Reorder the tests."""
test = self._put_transaction_test_cases_last(test)
if self.should_bundle:
test = self._bundle_fixtures(test)
return test
| bsd-3-clause |
emersonsoftware/ansiblefork | test/units/executor/test_task_result.py | 104 | 5583 | # (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertTrue(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
| gpl-3.0 |
jdemon519/cfme_tests | cfme/tests/cloud/test_cloud_init_provisioning.py | 1 | 3191 | # -*- coding: utf-8 -*-
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
import fauxfactory
import pytest
from cfme.cloud.instance import Instance
from cfme.cloud.provider import CloudProvider
from cfme.infrastructure.pxe import get_template_from_config
from utils import testgen, ssh
from utils.log import logger
from utils.wait import wait_for
pytestmark = [pytest.mark.meta(server_roles="+automate")]
pytest_generate_tests = testgen.generate(
[CloudProvider], required_fields=[
['provisioning', 'ci-template'],
['provisioning', 'ci-username'],
['provisioning', 'ci-pass'],
['provisioning', 'image']],
scope="module")
@pytest.fixture(scope="module")
def setup_ci_template(provider):
cloud_init_template_name = provider.data['provisioning']['ci-template']
cloud_init_template = get_template_from_config(cloud_init_template_name)
if not cloud_init_template.exists():
cloud_init_template.create()
@pytest.fixture(scope="function")
def vm_name(request):
vm_name = 'test_image_prov_{}'.format(fauxfactory.gen_alphanumeric())
return vm_name
@pytest.mark.tier(3)
def test_provision_cloud_init(request, setup_provider, provider, provisioning,
setup_ci_template, vm_name):
""" Tests provisioning from a template with cloud_init
Metadata:
test_flag: cloud_init, provision
"""
image = provisioning.get('ci-image', None) or provisioning['image']['name']
note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
image, vm_name, provider.key))
logger.info(note)
mgmt_system = provider.mgmt
instance = Instance.factory(vm_name, provider, image)
request.addfinalizer(instance.delete_from_provider)
inst_args = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
'instance_type': provisioning['instance_type'],
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair'],
'custom_template': {'name': [provisioning['ci-template']]},
}
if provider.type == "openstack":
floating_ip = mgmt_system.get_first_floating_ip()
inst_args['cloud_network'] = provisioning['cloud_network']
inst_args['public_ip_address'] = floating_ip
logger.info('Instance args: {}'.format(inst_args))
instance.create(**inst_args)
connect_ip, tc = wait_for(mgmt_system.get_ip_address, [vm_name], num_sec=300,
handle_exception=True)
# Check that we can at least get the uptime via ssh this should only be possible
# if the username and password have been set via the cloud-init script so
# is a valid check
sshclient = ssh.SSHClient(hostname=connect_ip, username=provisioning['ci-username'],
password=provisioning['ci-pass'])
wait_for(sshclient.uptime, num_sec=200, handle_exception=True)
| gpl-2.0 |
edf-hpc/jube | jube2/result.py | 1 | 6706 | # JUBE Benchmarking Environment
# Copyright (C) 2008-2015
# Forschungszentrum Juelich GmbH, Juelich Supercomputing Centre
# http://www.fz-juelich.de/jsc/jube
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Resulttype definition"""
from __future__ import (print_function,
unicode_literals,
division)
import jube2.util
import xml.etree.ElementTree as ET
import re
import jube2.log
LOGGER = jube2.log.get_logger(__name__)
class Result(object):
"""A generic result type"""
class ResultData(object):
"""A gerneric result data type"""
def __init__(self, name):
self._name = name
@property
def name(self):
"""Return the result name"""
return self._name
def create_result(self, show=True, filename=None, **kwargs):
"""Create result output"""
raise NotImplementedError("")
def add_result_data(self, result_data):
"""Add additional result data"""
raise NotImplementedError("")
def __eq__(self, other):
return self.name == other.name
def __init__(self, name):
self._use = set()
self._name = name
self._result_dir = None
self._benchmark = None
@property
def name(self):
"""Return the result name"""
return self._name
@property
def benchmark(self):
"""Return the benchmark"""
return self._benchmark
@property
def result_dir(self):
"""Return the result_dir"""
return self._result_dir
@result_dir.setter
def result_dir(self, result_dir):
"""Set the result_dir"""
self._result_dir = result_dir
@benchmark.setter
def benchmark(self, benchmark):
"""Set the benchmark"""
self._benchmark = benchmark
def add_uses(self, use_names):
"""Add an addtional analyser name"""
for use_name in use_names:
if use_name in self._use:
raise ValueError(("Can't use element \"{0}\" two times")
.format(use_name))
self._use.add(use_name)
def create_result_data(self):
"""Create result representation"""
raise NotImplementedError("")
def _analyse_data(self):
"""Load analyse data out of given analysers"""
for analyser_name in self._use:
analyser = self._benchmark.analyser[analyser_name]
analyse = analyser.analyse_result
# Ignore empty analyse results
if analyse is None:
LOGGER.warning(("No data found for analyser \"{0}\" "
"in benchmark run {1}. "
"Run analyse step first please.")
.format(analyser_name, self._benchmark.id))
continue
for stepname in analyse:
for wp_id in analyse[stepname]:
workpackage = None
for wp_tmp in self._benchmark.workpackages[stepname]:
if wp_tmp.id == wp_id:
workpackage = wp_tmp
break
# Read workpackage history parameterset
parameterset = workpackage.add_jube_parameter(
workpackage.history.copy())
parameter_dict = dict()
for par in parameterset:
parameter_dict[par.name] = \
jube2.util.convert_type(par.parameter_type,
par.value, stop=False)
analyse_dict = analyse[stepname][wp_id]
analyse_dict.update(parameter_dict)
# Add jube additional information
analyse_dict.update({
"jube_res_analyser": analyser_name,
})
yield analyse_dict
def _load_units(self, pattern_names):
"""Load units"""
units = dict()
alt_pattern_names = list(pattern_names)
for i, pattern_name in enumerate(alt_pattern_names):
for option in ["last", "min", "max", "avg", "sum", "std"]:
matcher = re.match("^(.+)_{0}$".format(option), pattern_name)
if matcher:
alt_pattern_names[i] = matcher.group(1)
for analyser_name in self._use:
if analyser_name not in self._benchmark.analyser:
raise RuntimeError(
"<analyser name=\"{0}\"> not found".format(analyser_name))
patternset_names = \
self._benchmark.analyser[analyser_name].use.copy()
for analyse_files in \
self._benchmark.analyser[analyser_name].analyser.values():
for analyse_file in analyse_files:
for use in analyse_file.use:
patternset_names.add(use)
for patternset_name in patternset_names:
patternset = self._benchmark.patternsets[patternset_name]
for i, pattern_name in enumerate(pattern_names):
alt_pattern_name = alt_pattern_names[i]
if (pattern_name in patternset) or \
(alt_pattern_name in patternset):
pattern = patternset[pattern_name]
if pattern is None:
pattern = patternset[alt_pattern_name]
if (pattern.unit is not None) and (pattern.unit != ""):
units[pattern_name] = pattern.unit
return units
def etree_repr(self):
"""Return etree object representation"""
result_etree = ET.Element("result")
if self._result_dir is not None:
result_etree.attrib["result_dir"] = self._result_dir
for use in self._use:
use_etree = ET.SubElement(result_etree, "use")
use_etree.text = use
return result_etree
| gpl-3.0 |
leonardocsantoss/ehventos | lib/reportlab/lib/PyFontify.py | 10 | 5012 | #Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__=''' $Id: PyFontify.py 3660 2010-02-08 18:17:33Z damian $ '''
__doc__="""
Module to analyze Python source code; for syntax coloring tools.
Interface::
tags = fontify(pytext, searchfrom, searchto)
- The 'pytext' argument is a string containing Python source code.
- The (optional) arguments 'searchfrom' and 'searchto' may contain a slice in pytext.
- The returned value is a list of tuples, formatted like this::
[('keyword', 0, 6, None), ('keyword', 11, 17, None), ('comment', 23, 53, None), etc. ]
- The tuple contents are always like this::
(tag, startindex, endindex, sublist)
- tag is one of 'keyword', 'string', 'comment' or 'identifier'
- sublist is not used, hence always None.
"""
# Based on FontText.py by Mitchell S. Chapman,
# which was modified by Zachary Roadhouse,
# then un-Tk'd by Just van Rossum.
# Many thanks for regular expression debugging & authoring are due to:
# Tim (the-incredib-ly y'rs) Peters and Cristian Tismer
# So, who owns the copyright? ;-) How about this:
# Copyright 1996-2001:
# Mitchell S. Chapman,
# Zachary Roadhouse,
# Tim Peters,
# Just van Rossum
__version__ = "0.4"
import re
# First a little helper, since I don't like to repeat things. (Tismer speaking)
def replace(src, sep, rep):
return rep.join(src.split(sep))
# This list of keywords is taken from ref/node13.html of the
# Python 1.3 HTML documentation. ("access" is intentionally omitted.)
keywordsList = [
"as", "assert", "exec",
"del", "from", "lambda", "return",
"and", "elif", "global", "not", "try",
"break", "else", "if", "or", "while",
"class", "except", "import", "pass",
"continue", "finally", "in", "print",
"def", "for", "is", "raise", "yield"]
# Build up a regular expression which will match anything
# interesting, including multi-line triple-quoted strings.
commentPat = r"#[^\n]*"
pat = r"q[^\\q\n]*(\\[\000-\377][^\\q\n]*)*q"
quotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
# Way to go, Tim!
pat = r"""
qqq
[^\\q]*
(
( \\[\000-\377]
| q
( \\[\000-\377]
| [^\q]
| q
( \\[\000-\377]
| [^\\q]
)
)
)
[^\\q]*
)*
qqq
"""
pat = ''.join(pat.split()) # get rid of whitespace
tripleQuotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
# Build up a regular expression which matches all and only
# Python keywords. This will let us skip the uninteresting
# identifier references.
# nonKeyPat identifies characters which may legally precede
# a keyword pattern.
nonKeyPat = r"(^|[^a-zA-Z0-9_.\"'])"
keyPat = nonKeyPat + "(" + "|".join(keywordsList) + ")" + nonKeyPat
matchPat = commentPat + "|" + keyPat + "|" + tripleQuotePat + "|" + quotePat
matchRE = re.compile(matchPat)
idKeyPat = "[ \t]*[A-Za-z_][A-Za-z_0-9.]*" # Ident w. leading whitespace.
idRE = re.compile(idKeyPat)
def fontify(pytext, searchfrom = 0, searchto = None):
if searchto is None:
searchto = len(pytext)
# Cache a few attributes for quicker reference.
search = matchRE.search
idSearch = idRE.search
tags = []
tags_append = tags.append
commentTag = 'comment'
stringTag = 'string'
keywordTag = 'keyword'
identifierTag = 'identifier'
start = 0
end = searchfrom
while 1:
m = search(pytext, end)
if m is None:
break # EXIT LOOP
start = m.start()
if start >= searchto:
break # EXIT LOOP
match = m.group(0)
end = start + len(match)
c = match[0]
if c not in "#'\"":
# Must have matched a keyword.
if start != searchfrom:
# there's still a redundant char before and after it, strip!
match = match[1:-1]
start = start + 1
else:
# this is the first keyword in the text.
# Only a space at the end.
match = match[:-1]
end = end - 1
tags_append((keywordTag, start, end, None))
# If this was a defining keyword, look ahead to the
# following identifier.
if match in ["def", "class"]:
m = idSearch(pytext, end)
if m is not None:
start = m.start()
if start == end:
match = m.group(0)
end = start + len(match)
tags_append((identifierTag, start, end, None))
elif c == "#":
tags_append((commentTag, start, end, None))
else:
tags_append((stringTag, start, end, None))
return tags
def test(path):
f = open(path)
text = f.read()
f.close()
tags = fontify(text)
for tag, start, end, sublist in tags:
print tag, repr(text[start:end])
| gpl-2.0 |
MonicaHsu/truvaluation | venv/lib/python2.7/encodings/cp862.py | 593 | 33626 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
u'\u05d1' # 0x0081 -> HEBREW LETTER BET
u'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x0083 -> HEBREW LETTER DALET
u'\u05d4' # 0x0084 -> HEBREW LETTER HE
u'\u05d5' # 0x0085 -> HEBREW LETTER VAV
u'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x0087 -> HEBREW LETTER HET
u'\u05d8' # 0x0088 -> HEBREW LETTER TET
u'\u05d9' # 0x0089 -> HEBREW LETTER YOD
u'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x008b -> HEBREW LETTER KAF
u'\u05dc' # 0x008c -> HEBREW LETTER LAMED
u'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x008e -> HEBREW LETTER MEM
u'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x0090 -> HEBREW LETTER NUN
u'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
u'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x0094 -> HEBREW LETTER PE
u'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
u'\u05e7' # 0x0097 -> HEBREW LETTER QOF
u'\u05e8' # 0x0098 -> HEBREW LETTER RESH
u'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
u'\u05ea' # 0x009a -> HEBREW LETTER TAV
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
jweinst1/Rooster | dist/Rooster-1.0.0/rooster/__init__.py | 2 | 3185 | #file for creating the hash key-values
import os
import sys
from rooster.hashfunc import *
from rooster.parsedata import *
#creates a directory in python
def createrooster(directory):
if not os.path.exists(directory):
os.makedirs(directory)
else:
raise FileExistsError("directory name already exists")
#sets a rooster key-value pair
def set_rooster(key, value, directory):
try:
hash_key = do_hash(key)
hash_key += ".rooster"
target = open(os.path.join(directory, hash_key), 'w')
target.write(str(value))
target.close()
except FileNotFoundError:
return "directory does not exist"
except ValueError:
return "key contains invalid characters, please use only alpha numeric or spaces"
#gets a rooster key-value pair
def get_rooster(key, directory):
try:
hash_key = do_hash(key)
hash_key += ".rooster"
target = open(os.path.join(directory, hash_key), 'r')
value = target.read()
target.close()
return parse_data(value) #parses the value back into appropriate python data structure.
except FileNotFoundError:
return "key or directory does not exist"
except ValueError:
return "key contains invalid characters, please use only alpha numeric or spaces"
#strictly returns the string within the rooster storage file, without parsing it back into a data structure
def get_str_rooster(key, directory):
try:
hash_key = do_hash(key)
hash_key += ".rooster"
target = open(os.path.join(directory, hash_key), 'r')
value = target.read()
target.close()
return value
except FileNotFoundError:
return "key does not exist"
except ValueError:
return "key contains invalid characters, please use only alpha numeric or spaces"
#checks if a key exists
def check_key(key, directory):
hash_key = do_hash(key)
hash_key += ".rooster"
target = os.path.join(directory, hash_key)
return os.path.exists(target)
#only sets a key if it does not overwrite current key.
def safe_set(key, value, directory):
if check_key(key, directory):
return "key already exists"
else:
set_rooster(key, value, directory)
#deletes a key-value entry from the hash-bucket returns true if successful.
def del_key(key, directory):
hash_key = do_hash(key)
hash_key += ".rooster"
target = os.path.join(directory, hash_key)
os.remove(target)
return True
#takes a Python dictionary and saves the entire dictionary into seperate key-value buckets.
def set_dict(dict, directory):
keys, values = list(dict.keys()), list(dict.values())
for i in range(len(keys)):
set_rooster(keys[i], values[i], directory)
return True
#safe sets a python dictionary as seperate key-value buckets
def safeset_dict(dict, directory):
keys, values = list(dict.keys()), list(dict.values())
for i in range(len(keys)):
safe_set(keys[i], values[i], directory)
return True
#file open method for production, for debugging, use default open method.
def getScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0])) | mit |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/encodings/cp864.py | 593 | 33919 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp864',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0025: 0x066a, # ARABIC PERCENT SIGN
0x0080: 0x00b0, # DEGREE SIGN
0x0081: 0x00b7, # MIDDLE DOT
0x0082: 0x2219, # BULLET OPERATOR
0x0083: 0x221a, # SQUARE ROOT
0x0084: 0x2592, # MEDIUM SHADE
0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
0x0086: 0x2502, # FORMS LIGHT VERTICAL
0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
0x0090: 0x03b2, # GREEK SMALL BETA
0x0091: 0x221e, # INFINITY
0x0092: 0x03c6, # GREEK SMALL PHI
0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
0x0094: 0x00bd, # FRACTION 1/2
0x0095: 0x00bc, # FRACTION 1/4
0x0096: 0x2248, # ALMOST EQUAL TO
0x0097: 0x00ab, # LEFT POINTING GUILLEMET
0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
0x009f: None, # UNDEFINED
0x00a1: 0x00ad, # SOFT HYPHEN
0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x00a2, # CENT SIGN
0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00db: 0x00a6, # BROKEN VERTICAL BAR
0x00dc: 0x00ac, # NOT SIGN
0x00dd: 0x00f7, # DIVISION SIGN
0x00de: 0x00d7, # MULTIPLICATION SIGN
0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
0x00f1: 0x0651, # ARABIC SHADDAH
0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: None, # UNDEFINED
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'\u066a' # 0x0025 -> ARABIC PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xb0' # 0x0080 -> DEGREE SIGN
u'\xb7' # 0x0081 -> MIDDLE DOT
u'\u2219' # 0x0082 -> BULLET OPERATOR
u'\u221a' # 0x0083 -> SQUARE ROOT
u'\u2592' # 0x0084 -> MEDIUM SHADE
u'\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
u'\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
u'\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
u'\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
u'\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
u'\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
u'\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
u'\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
u'\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
u'\u03b2' # 0x0090 -> GREEK SMALL BETA
u'\u221e' # 0x0091 -> INFINITY
u'\u03c6' # 0x0092 -> GREEK SMALL PHI
u'\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
u'\xbd' # 0x0094 -> FRACTION 1/2
u'\xbc' # 0x0095 -> FRACTION 1/4
u'\u2248' # 0x0096 -> ALMOST EQUAL TO
u'\xab' # 0x0097 -> LEFT POINTING GUILLEMET
u'\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
u'\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x009b -> UNDEFINED
u'\ufffe' # 0x009c -> UNDEFINED
u'\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
u'\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
u'\ufffe' # 0x009f -> UNDEFINED
u'\xa0' # 0x00a0 -> NON-BREAKING SPACE
u'\xad' # 0x00a1 -> SOFT HYPHEN
u'\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
u'\xa3' # 0x00a3 -> POUND SIGN
u'\xa4' # 0x00a4 -> CURRENCY SIGN
u'\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x00a6 -> UNDEFINED
u'\ufffe' # 0x00a7 -> UNDEFINED
u'\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
u'\u060c' # 0x00ac -> ARABIC COMMA
u'\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
u'\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
u'\u061b' # 0x00bb -> ARABIC SEMICOLON
u'\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
u'\xa2' # 0x00c0 -> CENT SIGN
u'\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
u'\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
u'\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
u'\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
u'\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
u'\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
u'\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
u'\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
u'\xa6' # 0x00db -> BROKEN VERTICAL BAR
u'\xac' # 0x00dc -> NOT SIGN
u'\xf7' # 0x00dd -> DIVISION SIGN
u'\xd7' # 0x00de -> MULTIPLICATION SIGN
u'\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
u'\u0640' # 0x00e0 -> ARABIC TATWEEL
u'\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
u'\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
u'\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
u'\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
u'\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
u'\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
u'\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
u'\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
u'\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
u'\u0651' # 0x00f1 -> ARABIC SHADDAH
u'\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
u'\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
u'\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
u'\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
u'\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
u'\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
u'\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
u'\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\ufffe' # 0x00ff -> UNDEFINED
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00a0, # NON-BREAKING SPACE
0x00a2: 0x00c0, # CENT SIGN
0x00a3: 0x00a3, # POUND SIGN
0x00a4: 0x00a4, # CURRENCY SIGN
0x00a6: 0x00db, # BROKEN VERTICAL BAR
0x00ab: 0x0097, # LEFT POINTING GUILLEMET
0x00ac: 0x00dc, # NOT SIGN
0x00ad: 0x00a1, # SOFT HYPHEN
0x00b0: 0x0080, # DEGREE SIGN
0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
0x00b7: 0x0081, # MIDDLE DOT
0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
0x00bc: 0x0095, # FRACTION 1/4
0x00bd: 0x0094, # FRACTION 1/2
0x00d7: 0x00de, # MULTIPLICATION SIGN
0x00f7: 0x00dd, # DIVISION SIGN
0x03b2: 0x0090, # GREEK SMALL BETA
0x03c6: 0x0092, # GREEK SMALL PHI
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0640: 0x00e0, # ARABIC TATWEEL
0x0651: 0x00f1, # ARABIC SHADDAH
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
0x066a: 0x0025, # ARABIC PERCENT SIGN
0x2219: 0x0082, # BULLET OPERATOR
0x221a: 0x0083, # SQUARE ROOT
0x221e: 0x0091, # INFINITY
0x2248: 0x0096, # ALMOST EQUAL TO
0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
0x2502: 0x0086, # FORMS LIGHT VERTICAL
0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x2592: 0x0084, # MEDIUM SHADE
0x25a0: 0x00fe, # BLACK SQUARE
0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
}
| lgpl-3.0 |
brianmoose/civet | client/tests/test_BaseClient_live.py | 2 | 8047 |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from client import JobGetter
from django.test import override_settings
from mock import patch
import os, subprocess
import threading
import time
from ci import views
from ci.tests import utils as test_utils
from client.tests import LiveClientTester, utils
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(LiveClientTester.LiveClientTester):
def create_client_and_job(self, recipe_dir, name, sleep=1):
c = utils.create_base_client()
os.environ["BUILD_ROOT"] = "/foo/bar"
c.client_info["single_shot"] = True
c.client_info["update_step_time"] = 1
c.client_info["ssl_cert"] = False # not needed but will get another line of coverage
c.client_info["server"] = self.live_server_url
c.client_info["servers"] = [self.live_server_url]
job = utils.create_client_job(recipe_dir, name=name, sleep=sleep)
c.client_info["build_configs"] = [job.config.name]
c.client_info["build_key"] = job.recipe.build_user.build_key
return c, job
def test_no_signals(self):
with test_utils.RecipeDir() as recipe_dir:
# This is just for coverage. We can't really
# test this because if we send a signal it will just quit
import signal
old_signal = signal.SIGUSR2
del signal.SIGUSR2
c, job = self.create_client_and_job(recipe_dir, "No signal", sleep=2)
signal.SIGUSR2 = old_signal
def test_run_success(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "RunSuccess", sleep=2)
self.set_counts()
c.run()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job)
def test_run_graceful(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Graceful", sleep=2)
self.set_counts()
c.client_info["single_shot"] = False
c.client_info["poll"] = 1
# graceful signal, should complete
script = "sleep 3 && kill -USR2 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job)
self.assertEqual(c.graceful_signal.triggered, True)
self.assertEqual(c.cancel_signal.triggered, False)
def test_run_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Cancel", sleep=4)
self.set_counts()
c.client_info["single_shot"] = False
c.client_info["poll"] = 1
# cancel signal, should stop
script = "sleep 3 && kill -USR1 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(canceled=1,
num_clients=1,
num_events_completed=1,
num_jobs_completed=1,
active_branches=1,
events_canceled=1,
)
self.assertEqual(c.cancel_signal.triggered, True)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job)
def test_run_job_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobCancel", sleep=4)
# cancel response, should cancel the job
self.set_counts()
thread = threading.Thread(target=c.run)
thread.start()
time.sleep(4)
job.refresh_from_db()
views.set_job_canceled(job)
thread.join()
self.compare_counts(canceled=1,
num_clients=1,
num_events_completed=1,
num_jobs_completed=1,
active_branches=1,
events_canceled=1,
)
self.assertEqual(c.cancel_signal.triggered, False)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job)
def test_run_job_invalidated_basic(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run)
thread.start()
start_time = time.time()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(invalidated=1, num_clients=1, num_changelog=1)
utils.check_stopped_job(self, job)
def test_run_job_invalidated_nested_bash(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
job.delete()
job = utils.create_job_with_nested_bash(recipe_dir, name="JobWithNestedBash", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run)
start_time = time.time()
thread.start()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
@patch.object(JobGetter.JobGetter, 'find_job')
def test_exception(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
# check exception handler
mock_getter.side_effect = Exception("oh no!")
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
self.set_counts()
c.run()
self.compare_counts()
@patch.object(JobGetter.JobGetter, 'find_job')
def test_runner_error(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
mock_getter.return_value = None
c, job = self.create_client_and_job(recipe_dir, "JobError")
self.set_counts()
c.runner_error = True
c.run()
self.compare_counts()
def test_bad_thread_join(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "BadThreadJoin", sleep=2)
c.thread_join_wait = 0
self.set_counts()
c.run()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job)
| apache-2.0 |
hbrunn/OCB | openerp/modules/module.py | 21 | 16212 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import functools
import imp
import importlib
import inspect
import itertools
import logging
import os
import re
import sys
import time
import unittest
import threading
from os.path import join as opj
import unittest2
import openerp
import openerp.tools as tools
import openerp.release as release
from openerp.tools.safe_eval import safe_eval as eval
MANIFEST = '__openerp__.py'
README = ['README.rst', 'README.md', 'README.txt']
_logger = logging.getLogger(__name__)
# addons path as a list
ad_paths = []
hooked = False
# Modules already loaded
loaded = []
class AddonsImportHook(object):
"""
Import hook to load OpenERP addons from multiple paths.
OpenERP implements its own import-hook to load its addons. OpenERP
addons are Python modules. Originally, they were each living in their
own top-level namespace, e.g. the sale module, or the hr module. For
backward compatibility, `import <module>` is still supported. Now they
are living in `openerp.addons`. The good way to import such modules is
thus `import openerp.addons.module`.
"""
def find_module(self, module_name, package_path=None):
module_parts = module_name.split('.')
if len(module_parts) == 3 and module_name.startswith('openerp.addons.'):
return self # We act as a loader too.
def load_module(self, module_name):
if module_name in sys.modules:
return sys.modules[module_name]
_1, _2, module_part = module_name.split('.')
# Note: we don't support circular import.
f, path, descr = imp.find_module(module_part, ad_paths)
mod = imp.load_module('openerp.addons.' + module_part, f, path, descr)
sys.modules['openerp.addons.' + module_part] = mod
return mod
def initialize_sys_path():
"""
Setup an import-hook to be able to import OpenERP addons from the different
addons paths.
This ensures something like ``import crm`` (or even
``import openerp.addons.crm``) works even if the addons are not in the
PYTHONPATH.
"""
global ad_paths
global hooked
dd = tools.config.addons_data_dir
if dd not in ad_paths:
ad_paths.append(dd)
for ad in tools.config['addons_path'].split(','):
ad = os.path.abspath(tools.ustr(ad.strip()))
if ad not in ad_paths:
ad_paths.append(ad)
# add base module path
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons'))
if base_path not in ad_paths:
ad_paths.append(base_path)
if not hooked:
sys.meta_path.append(AddonsImportHook())
hooked = True
def get_module_path(module, downloaded=False, display_warning=True):
"""Return the path of the given module.
Search the addons paths and return the first path where the given
module is found. If downloaded is True, return the default addons
path if nothing else is found.
"""
initialize_sys_path()
for adp in ad_paths:
if os.path.exists(opj(adp, module)) or os.path.exists(opj(adp, '%s.zip' % module)):
return opj(adp, module)
if downloaded:
return opj(tools.config.addons_data_dir, module)
if display_warning:
_logger.warning('module %s: module not found', module)
return False
def get_module_filetree(module, dir='.'):
path = get_module_path(module)
if not path:
return False
dir = os.path.normpath(dir)
if dir == '.':
dir = ''
if dir.startswith('..') or (dir and dir[0] == '/'):
raise Exception('Cannot access file outside the module')
files = openerp.tools.osutil.listdir(path, True)
tree = {}
for f in files:
if not f.startswith(dir):
continue
if dir:
f = f[len(dir)+int(not dir.endswith('/')):]
lst = f.split(os.sep)
current = tree
while len(lst) != 1:
current = current.setdefault(lst.pop(0), {})
current[lst.pop(0)] = None
return tree
def get_module_resource(module, *args):
"""Return the full path of a resource of the given module.
:param module: module name
:param list(str) args: resource path components within module
:rtype: str
:return: absolute path to the resource
TODO name it get_resource_path
TODO make it available inside on osv object (self.get_resource_path)
"""
mod_path = get_module_path(module)
if not mod_path: return False
resource_path = opj(mod_path, *args)
if os.path.isdir(mod_path):
# the module is a directory - ignore zip behavior
if os.path.exists(resource_path):
return resource_path
return False
def get_module_icon(module):
iconpath = ['static', 'description', 'icon.png']
if get_module_resource(module, *iconpath):
return ('/' + module + '/') + '/'.join(iconpath)
return '/base/' + '/'.join(iconpath)
def get_module_root(path):
"""
Get closest module's root begining from path
# Given:
# /foo/bar/module_dir/static/src/...
get_module_root('/foo/bar/module_dir/static/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar/module_dir/')
# returns '/foo/bar/module_dir'
get_module_root('/foo/bar')
# returns None
@param path: Path from which the lookup should start
@return: Module root path or None if not found
"""
while not os.path.exists(os.path.join(path, MANIFEST)):
new_path = os.path.abspath(os.path.join(path, os.pardir))
if path == new_path:
return None
path = new_path
return path
def load_information_from_description_file(module, mod_path=None):
"""
:param module: The name of the module (sale, purchase, ...)
:param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...)
"""
if not mod_path:
mod_path = get_module_path(module)
terp_file = mod_path and opj(mod_path, MANIFEST) or False
if terp_file:
info = {}
if os.path.isfile(terp_file):
# default values for descriptor
info = {
'application': False,
'author': '',
'auto_install': False,
'category': 'Uncategorized',
'depends': [],
'description': '',
'icon': get_module_icon(module),
'installable': True,
'license': 'AGPL-3',
'post_load': None,
'version': '1.0',
'web': False,
'website': '',
'sequence': 100,
'summary': '',
}
info.update(itertools.izip(
'depends data demo test init_xml update_xml demo_xml'.split(),
iter(list, None)))
f = tools.file_open(terp_file)
try:
info.update(eval(f.read()))
finally:
f.close()
if not info.get('description'):
readme_path = [opj(mod_path, x) for x in README
if os.path.isfile(opj(mod_path, x))]
if readme_path:
readme_text = tools.file_open(readme_path[0]).read()
info['description'] = readme_text
if 'active' in info:
# 'active' has been renamed 'auto_install'
info['auto_install'] = info['active']
info['version'] = adapt_version(info['version'])
return info
#TODO: refactor the logger in this file to follow the logging guidelines
# for 6.0
_logger.debug('module %s: no %s file found.', module, MANIFEST)
return {}
def init_module_models(cr, module_name, obj_list):
""" Initialize a list of models.
Call _auto_init and init on each model to create or update the
database tables supporting the models.
TODO better explanation of _auto_init and init.
"""
_logger.info('module %s: creating or updating database tables', module_name)
todo = []
for obj in obj_list:
result = obj._auto_init(cr, {'module': module_name})
if result:
todo += result
if hasattr(obj, 'init'):
obj.init(cr)
cr.commit()
for obj in obj_list:
obj._auto_end(cr, {'module': module_name})
cr.commit()
todo.sort(key=lambda x: x[0])
for t in todo:
t[1](cr, *t[2])
cr.commit()
def load_openerp_module(module_name):
""" Load an OpenERP module, if not already loaded.
This loads the module and register all of its models, thanks to either
the MetaModel metaclass, or the explicit instantiation of the model.
This is also used to load server-wide module (i.e. it is also used
when there is no model to register).
"""
global loaded
if module_name in loaded:
return
initialize_sys_path()
try:
mod_path = get_module_path(module_name)
__import__('openerp.addons.' + module_name)
# Call the module's post-load hook. This can done before any model or
# data has been initialized. This is ok as the post-load hook is for
# server-wide (instead of registry-specific) functionalities.
info = load_information_from_description_file(module_name)
if info['post_load']:
getattr(sys.modules['openerp.addons.' + module_name], info['post_load'])()
except Exception, e:
msg = "Couldn't load module %s" % (module_name)
_logger.critical(msg)
_logger.critical(e)
raise
else:
loaded.append(module_name)
def get_modules():
"""Returns the list of module names
"""
def listdir(dir):
def clean(name):
name = os.path.basename(name)
if name[-4:] == '.zip':
name = name[:-4]
return name
def is_really_module(name):
manifest_name = opj(dir, name, MANIFEST)
zipfile_name = opj(dir, name)
return os.path.isfile(manifest_name)
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
initialize_sys_path()
for ad in ad_paths:
plist.extend(listdir(ad))
return list(set(plist))
def get_modules_with_version():
modules = get_modules()
res = dict.fromkeys(modules, adapt_version('1.0'))
for module in modules:
try:
info = load_information_from_description_file(module)
res[module] = info['version']
except Exception:
continue
return res
def adapt_version(version):
serie = release.major_version
if version == serie or not version.startswith(serie + '.'):
version = '%s.%s' % (serie, version)
return version
def get_test_modules(module):
""" Return a list of module for the addons potentially containing tests to
feed unittest2.TestLoader.loadTestsFromModule() """
# Try to import the module
modpath = 'openerp.addons.' + module
try:
mod = importlib.import_module('.tests', modpath)
except Exception, e:
# If module has no `tests` sub-module, no problem.
if str(e) != 'No module named tests':
_logger.exception('Can not `import %s`.', module)
return []
if hasattr(mod, 'fast_suite') or hasattr(mod, 'checks'):
_logger.warn(
"Found deprecated fast_suite or checks attribute in test module "
"%s. These have no effect in or after version 8.0.",
mod.__name__)
result = [mod_obj for name, mod_obj in inspect.getmembers(mod, inspect.ismodule)
if name.startswith('test_')]
return result
# Use a custom stream object to log the test executions.
class TestStream(object):
def __init__(self, logger_name='openerp.tests'):
self.logger = logging.getLogger(logger_name)
self.r = re.compile(r'^-*$|^ *... *$|^ok$')
def flush(self):
pass
def write(self, s):
if self.r.match(s):
return
first = True
level = logging.ERROR if s.startswith(('ERROR', 'FAIL', 'Traceback')) else logging.INFO
for c in s.splitlines():
if not first:
c = '` ' + c
first = False
self.logger.log(level, c)
current_test = None
def runs_at(test, hook, default):
# by default, tests do not run post install
test_runs = getattr(test, hook, default)
# for a test suite, we're done
if not isinstance(test, unittest.TestCase):
return test_runs
# otherwise check the current test method to see it's been set to a
# different state
method = getattr(test, test._testMethodName)
return getattr(method, hook, test_runs)
runs_at_install = functools.partial(runs_at, hook='at_install', default=True)
runs_post_install = functools.partial(runs_at, hook='post_install', default=False)
def run_unit_tests(module_name, dbname, position=runs_at_install):
"""
:returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
if any of them failed.
:rtype: bool
"""
global current_test
current_test = module_name
mods = get_test_modules(module_name)
threading.currentThread().testing = True
r = True
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(position, tests))
if suite.countTestCases():
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
_logger.info('%s running tests.', m.__name__)
result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
if time.time() - t0 > 5:
_logger.log(25, "%s tested in %.2fs, %s queries", m.__name__, time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
if not result.wasSuccessful():
r = False
_logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors))
current_test = None
threading.currentThread().testing = False
return r
def unwrap_suite(test):
"""
Attempts to unpack testsuites (holding suites or cases) in order to
generate a single stream of terminals (either test cases or customized
test suites). These can then be checked for run/skip attributes
individually.
An alternative would be to use a variant of @unittest2.skipIf with a state
flag of some sort e.g. @unittest2.skipIf(common.runstate != 'at_install'),
but then things become weird with post_install as tests should *not* run
by default there
"""
if isinstance(test, unittest.TestCase):
yield test
return
subtests = list(test)
# custom test suite (no test cases)
if not len(subtests):
yield test
return
for item in itertools.chain.from_iterable(
itertools.imap(unwrap_suite, subtests)):
yield item
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mmoya/ansible | v2/ansible/module_utils/a10.py | 322 | 4194 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
AXAPI_PORT_PROTOCOLS = {
'tcp': 2,
'udp': 3,
}
AXAPI_VPORT_PROTOCOLS = {
'tcp': 2,
'udp': 3,
'fast-http': 9,
'http': 11,
'https': 12,
}
def a10_argument_spec():
return dict(
host=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
write_config=dict(type='bool', default=False)
)
def axapi_failure(result):
if 'response' in result and result['response'].get('status') == 'fail':
return True
return False
def axapi_call(module, url, post=None):
'''
Returns a datastructure based on the result of the API call
'''
rsp, info = fetch_url(module, url, data=post)
if not rsp or info['status'] >= 400:
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
try:
raw_data = rsp.read()
data = json.loads(raw_data)
except ValueError:
# at least one API call (system.action.write_config) returns
# XML even when JSON is requested, so do some minimal handling
# here to prevent failing even when the call succeeded
if 'status="ok"' in raw_data.lower():
data = {"response": {"status": "OK"}}
else:
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
except:
module.fail_json(msg="could not read the result from the host")
finally:
rsp.close()
return data
def axapi_authenticate(module, base_url, username, password):
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
result = axapi_call(module, url)
if axapi_failure(result):
return module.fail_json(msg=result['response']['err']['msg'])
sessid = result['session_id']
return base_url + '&session_id=' + sessid
def axapi_enabled_disabled(flag):
'''
The axapi uses 0/1 integer values for flags, rather than strings
or booleans, so convert the given flag to a 0 or 1. For now, params
are specified as strings only so thats what we check.
'''
if flag == 'enabled':
return 1
else:
return 0
def axapi_get_port_protocol(protocol):
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
def axapi_get_vport_protocol(protocol):
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
| gpl-3.0 |
r0balo/pelisalacarta | python/main-classic/servers/divxstage.py | 2 | 3145 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para divxstage/cloudtime
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import scrapertools
host = "http://www.cloudtime.to"
def test_video_exists( page_url ):
logger.info("[divxstage.py] test_video_exists(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url.replace('/embed/?v=', '/video/')).data
if "This file no longer exists" in data:
return False, "El archivo no existe<br/>en divxstage o ha sido borrado."
return True, ""
def get_video_url(page_url, premium = False, user="", password="", video_password=""):
logger.info("[divxstage.py] get_video_url(page_url='%s')" % page_url)
if "divxstage.net" in page_url:
page_url = page_url.replace("divxstage.net", "cloudtime.to")
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.cloudtime.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [cloudtime]", videourl])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# divxstage http://www.divxstage.net/video/of7ww1tdv62gf"
patronvideos = 'divxstage[^/]+/video/(\w+)$'
logger.info("#" + patronvideos + "#")
matches = scrapertools.find_multiple_matches(data, patronvideos)
for match in matches:
titulo = "[Divxstage]"
url = host + "/embed/?v=" + match
if url not in encontrados:
logger.info("url=" + url)
devuelve.append([titulo, url, 'divxstage'])
encontrados.add(url)
else:
logger.info("url duplicada=" + url)
# divxstage http://www.cloudtime.to/video/of7ww1tdv62gf"
patronvideos = 'cloudtime[^/]+/(?:video/|embed/\?v=)([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = scrapertools.find_multiple_matches(data, patronvideos)
for match in matches:
titulo = "[Cloudtime]"
url = host + "/embed/?v=" + match
if url not in encontrados:
logger.info("url=" + url)
devuelve.append([titulo, url, 'divxstage'])
encontrados.add(url)
else:
logger.info("url duplicada=" + url)
return devuelve
| gpl-3.0 |
javierder/dogestart.me | south/logger.py | 129 | 1175 | import sys
import logging
from django.conf import settings
# Create a dummy handler to use for now.
class NullHandler(logging.Handler):
def emit(self, record):
pass
def get_logger():
"Attach a file handler to the logger if there isn't one already."
debug_on = getattr(settings, "SOUTH_LOGGING_ON", False)
logging_file = getattr(settings, "SOUTH_LOGGING_FILE", False)
if debug_on:
if logging_file:
if len(_logger.handlers) < 2:
_logger.addHandler(logging.FileHandler(logging_file))
_logger.setLevel(logging.DEBUG)
else:
raise IOError("SOUTH_LOGGING_ON is True. You also need a SOUTH_LOGGING_FILE setting.")
return _logger
def close_logger():
"Closes the logger handler for the file, so we can remove the file after a test."
for handler in _logger.handlers:
_logger.removeHandler(handler)
if isinstance(handler, logging.FileHandler):
handler.close()
def init_logger():
"Initialize the south logger"
logger = logging.getLogger("south")
logger.addHandler(NullHandler())
return logger
_logger = init_logger()
| mit |
drawks/ansible | lib/ansible/modules/net_tools/nios/nios_zone.py | 86 | 6891 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_zone
version_added: "2.5"
author: "Peter Sprygada (@privateip)"
short_description: Configure Infoblox NIOS DNS zones
description:
- Adds and/or removes instances of DNS zone objects from
Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
fqdn:
description:
- Specifies the qualified domain name to either add or remove from
the NIOS instance based on the configured C(state) value.
required: true
aliases:
- name
view:
description:
- Configures the DNS view name for the configured resource. The
specified DNS zone must already exist on the running NIOS instance
prior to configuring zones.
required: true
default: default
aliases:
- dns_view
grid_primary:
description:
- Configures the grid primary servers for this zone.
suboptions:
name:
description:
- The name of the grid primary server
grid_secondaries:
description:
- Configures the grid secondary servers for this zone.
suboptions:
name:
description:
- The name of the grid secondary server
ns_group:
version_added: "2.6"
description:
- Configures the name server group for this zone. Name server group is
mutually exclusive with grid primary and grid secondaries.
restart_if_needed:
version_added: "2.6"
description:
- If set to true, causes the NIOS DNS service to restart and load the
new zone configuration
type: bool
zone_format:
version_added: "2.7"
description:
- Create an authorative Reverse-Mapping Zone which is an area of network
space for which one or more name servers-primary and secondary-have the
responsibility to respond to address-to-name queries. It supports
reverse-mapping zones for both IPv4 and IPv6 addresses.
default: FORWARD
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a zone on the system using grid primary and secondaries
nios_zone:
name: ansible.com
grid_primary:
- name: gridprimary.grid.com
grid_secondaries:
- name: gridsecondary1.grid.com
- name: gridsecondary2.grid.com
restart_if_needed: true
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: configure a zone on the system using a name server group
nios_zone:
name: ansible.com
ns_group: examplensg
restart_if_needed: true
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: configure a reverse mapping zone on the system using IPV4 zone format
nios_zone:
name: 10.10.10.0/24
zone_format: IPV4
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: configure a reverse mapping zone on the system using IPV6 zone format
nios_zone:
name: 100::1/128
zone_format: IPV6
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update the comment and ext attributes for an existing zone
nios_zone:
name: ansible.com
comment: this is an example comment
extattrs:
Site: west-dc
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove the dns zone
nios_zone:
name: ansible.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove the reverse mapping dns zone from the system with IPV4 zone format
nios_zone:
name: 10.10.10.0/24
zone_format: IPV4
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_ZONE
def main():
''' Main entry point for module execution
'''
grid_spec = dict(
name=dict(required=True),
)
ib_spec = dict(
fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
zone_format=dict(default='FORWARD', aliases=['zone_format'], ib_req=False),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
grid_primary=dict(type='list', elements='dict', options=grid_spec),
grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
ns_group=dict(),
restart_if_needed=dict(type='bool'),
extattrs=dict(type='dict'),
comment=dict()
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['ns_group', 'grid_primary'],
['ns_group', 'grid_secondaries']
])
wapi = WapiModule(module)
result = wapi.run(NIOS_ZONE, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
imcsk8/packstack | packstack/installer/utils/strings.py | 3 | 2168 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
STR_MASK = '*' * 8
COLORS = {'nocolor': "\033[0m", 'red': "\033[0;31m",
'green': "\033[32m", 'blue': "\033[34m",
'yellow': "\033[33m"}
def color_text(text, color):
"""
Returns given text string with appropriate color tag. Allowed values
for color parameter are 'red', 'blue', 'green' and 'yellow'.
"""
return '%s%s%s' % (COLORS[color], text, COLORS['nocolor'])
def mask_string(unmasked, mask_list=None, replace_list=None):
"""
Replaces words from mask_list with MASK in unmasked string.
If words are needed to be transformed before masking, transformation
could be describe in replace list. For example [("'","'\\''")]
replaces all ' characters with '\\''.
"""
mask_list = mask_list or []
replace_list = replace_list or []
masked = unmasked
for word in sorted(mask_list, lambda x, y: len(y) - len(x)):
if not word:
continue
for before, after in replace_list:
word = word.replace(before, after)
masked = masked.replace(word, STR_MASK)
return masked
def state_format(msg, state, color):
"""
Formats state with offset according to given message.
"""
_msg = '%s' % msg.strip()
for clr in COLORS.values():
_msg = re.sub(re.escape(clr), '', msg)
space = 70 - len(_msg)
state = '[ %s ]' % color_text(state, color)
return state.rjust(space)
def state_message(msg, state, color):
"""
Formats given message with colored state information.
"""
return '%s%s' % (msg, state_format(msg, state, color))
| apache-2.0 |
wangmingjob/OnlineJudge | problem/models.py | 3 | 1771 | # coding=utf-8
from django.db import models
from account.models import User
from utils.models import RichTextField
class ProblemTag(models.Model):
name = models.CharField(max_length=30)
class Meta:
db_table = "problem_tag"
class AbstractProblem(models.Model):
# 标题
title = models.CharField(max_length=50)
# 问题描述 HTML 格式
description = RichTextField()
# 输入描述
input_description = models.CharField(max_length=10000)
# 输出描述
output_description = models.CharField(max_length=10000)
# 样例输入 可能会存储 json 格式的数据
samples = models.TextField(blank=True)
# 测试用例id 这个id 可以用来拼接得到测试用例的文件存储位置
test_case_id = models.CharField(max_length=40)
# 提示
hint = models.TextField(blank=True, null=True)
# 创建时间
create_time = models.DateTimeField(auto_now_add=True)
# 最后更新时间
# last_update_time = models.DateTimeField(auto_now=True)
# 这个题是谁创建的
created_by = models.ForeignKey(User)
# 时间限制 单位是毫秒
time_limit = models.IntegerField()
# 内存限制 单位是MB
memory_limit = models.IntegerField()
# 是否可见 false的话相当于删除
visible = models.BooleanField(default=True)
# 总共提交数量
total_submit_number = models.IntegerField(default=0)
# 通过数量
total_accepted_number = models.IntegerField(default=0)
class Meta:
abstract = True
class Problem(AbstractProblem):
# 难度 0 - n
difficulty = models.IntegerField()
# 标签
tags = models.ManyToManyField(ProblemTag)
# 来源
source = models.CharField(max_length=30, blank=True, null=True)
| mit |
SGSSGene/busy | extRepositories/yaml-cpp/test/gmock-1.7.0/scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| isc |
vthorsteinsson/tensor2tensor | tensor2tensor/data_generators/desc2code.py | 7 | 10081 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the Description2Code OpenAI data-set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import re
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
# End-of-sentence marker.
EOS = text_encoder.EOS_ID
_DATASET_URL = "https://drive.google.com/uc?export=download&id=0Bz3fihKG133ceWNFQTQ5S0xhZUk"
_DATASET_FILENAME = "description2code_current.zip"
_DATASET_PB_PATH = "description2code_current/"
_DESC_DIR_NAME = "description"
_VOCAB_EN_FILENAME = "vocab.endefr"
_RE_CPP_INLINE_COMMENT = re.compile("//.*?\n") # Compiled once
# Constant defined for a language problem
CodingPbConstants = collections.namedtuple("CodingPbConstants", [
"code_dir_name",
"vocab_filename",
"filter_patterns",
"target_space",
])
PB_PY = CodingPbConstants(
code_dir_name="solutions_python",
vocab_filename="vocab.py",
filter_patterns=["#include", "# include", "import java."],
target_space=problem.SpaceID.PY_TOK,
)
PB_CPP = CodingPbConstants(
code_dir_name="solutions_c++",
vocab_filename="vocab.cpp",
filter_patterns=["import java."],
target_space=problem.SpaceID.CPP_TOK,
)
# Struct containing a coding problem (contains the paths to the descriptions
# and code files)
CodingPbInfo = collections.namedtuple("CodingPbInfo", "desc_file, code_files")
class Desc2CodeProblem(text_problems.Text2TextProblem):
"""Base class for Description2Code problems."""
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 10,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def input_vocab_size(self):
return 2**15 # 32k
@property
def target_vocab_size(self):
return 2**12 # 4k
@property
def vocab_input_filename(self):
return "{}.{}".format(_VOCAB_EN_FILENAME, self.input_vocab_size)
@property
def vocab_target_filename(self):
return "{}.{}".format(
self.pb_constants.vocab_filename, self.target_vocab_size)
def preprocess_target(self, target):
"""Apply some preprocessing to the target.
For instance, remove space/tabs.
Args:
target (str): code source content
Returns:
the pre-processed string content
"""
return target
def feature_encoders(self, data_dir):
source_vocab_filename = os.path.join(data_dir, self.vocab_input_filename)
target_vocab_filename = os.path.join(data_dir, self.vocab_target_filename)
source_token = text_encoder.SubwordTextEncoder(source_vocab_filename)
target_token = text_encoder.SubwordTextEncoder(target_vocab_filename)
return {
"inputs": source_token,
"targets": target_token,
}
def is_generate_per_split(self):
return True
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
# Called twice: for train and test
# Get the list of the training samples (coding challenge samples)
samples = list(generator_samples(tmp_dir, self.pb_constants))
# Split between train and dev
# Shuffle to get problems from diverse sources (CodeChef and CodeForces) and
# difficulties in each set.
# Need to sort the samples first before shuffling (as walk() isn't
# deterministic)
samples.sort(key=lambda x: x.desc_file) # in-place
rng = random.Random(7531) # Local fixed seed
rng.shuffle(samples) # in-place
# Train: 5019/5228 problems
# Dev: 209/5228 problems
len_samples = len(samples)
split = len_samples // 25
samples = samples[split:] if train else samples[:split]
tf.logging.info("Number of samples for {}: {}/{}".format(
"train" if train else "dev",
len(samples),
len_samples
))
def generator_samples_content(get_source, get_target):
"""Generate samples."""
source, target = None, None
# Iterate over the coding samples
for sample in samples:
if get_source:
with tf.gfile.GFile(sample.desc_file, mode="r") as source_file:
source = source_file.read()
if get_target:
# Each challenge can have multiple implementations (or none)
for code_file in sample.code_files:
with tf.gfile.GFile(code_file, mode="r") as target_file:
target = target_file.read()
target = self.preprocess_target(target)
yield source, target
elif sample.code_files: # Only take the source if a target exists
yield source, target
def generator_target():
for _, target in generator_samples_content(False, True):
yield target.strip()
# Generate vocab for both source and target
# TODO(lukaszkaiser): Fix vocab generation call. No sources given.
assert not self.vocab_input_filename
source_vocab = None
# source_vocab = generator_utils.get_or_generate_vocab(
# data_dir, tmp_dir, self.vocab_input_filename, self.input_vocab_size)
target_vocab = generator_utils.get_or_generate_vocab_inner(
data_dir=data_dir,
vocab_filename=self.vocab_target_filename,
vocab_size=self.target_vocab_size,
generator=generator_target(),)
# Yield the training and testing samples
eos_list = [EOS]
for source, target in generator_samples_content(True, True):
source_ints = source_vocab.encode(source.strip()) + eos_list
target_ints = target_vocab.encode(target.strip()) + eos_list
yield {
"inputs": source_ints,
"targets": target_ints,
}
@registry.register_problem
class ProgrammingDesc2codePy(Desc2CodeProblem):
"""Description2Code for python problem."""
@property
def pb_constants(self):
return PB_PY
def preprocess_target(self, target):
"""Simple tab to space replacement."""
return target.replace("\t", " ")
@registry.register_problem
class ProgrammingDesc2codeCpp(Desc2CodeProblem):
"""Description2Code for C++ problem."""
@property
def pb_constants(self):
return PB_CPP
def preprocess_target(self, target):
"""Pre-process Cpp files."""
target = re.sub(_RE_CPP_INLINE_COMMENT, " ", target) # Remove comments
# The regex rule is quite simple, So will fail if a // is inside a string,
# and don't remove /* */ comments
target = " ".join(target.split()) # Normalize all spaces
return target
# Utils functions
def generator_samples(tmp_dir, pb_cst):
"""Generator for the dataset samples.
If not present, download and extract the dataset.
Args:
tmp_dir: path to the directory where to download the dataset.
pb_cst: CodingPbConstants object defining paths
Yields:
A CodingPbInfo object containing the next challenge informations.
"""
# Step1: Download dataset (eventually)
data_zip_path = generator_utils.maybe_download_from_drive(
directory=tmp_dir,
filename=_DATASET_FILENAME,
url=_DATASET_URL,
)
tf.logging.info("Data downloaded in: {}".format(data_zip_path))
# Step2: Extract dataset
# We could deduce _DATASET_PB_PATH from the zip file (instead of
# hardcoded path)
data_rootdir = os.path.join(tmp_dir, _DATASET_PB_PATH)
if not tf.gfile.Exists(data_rootdir):
with zipfile.ZipFile(data_zip_path, "r") as corpus_zip:
corpus_zip.extractall(tmp_dir)
# We could remove the extracted __MACOSX folder
tf.logging.info("Data extracted in: {}".format(tmp_dir))
else:
tf.logging.info("Data already extracted in: {}".format(tmp_dir))
# Step3: Extract the problems list on the extracted folder
def contains_samples(subdir, dirs, files): # pylint: disable=unused-argument
"""Check that the folder contains a problem."""
return (
_DESC_DIR_NAME in dirs and
pb_cst.code_dir_name in dirs
)
def next_sample(subdir, dirs, files): # pylint: disable=unused-argument
"""Return the filenames of the problem."""
# More could be extracted (like the expected inputs/outputs
# pairs, the problem difficulty, the names of the algorithmic techniques
# needed)
desc_file = os.path.join(subdir, _DESC_DIR_NAME, "description.txt")
code_files = []
# As the dataset is noisy, the program deduce the language from the file
# content.
code_pattern = os.path.join(subdir, pb_cst.code_dir_name, "*.txt")
for f in tf.gfile.Glob(code_pattern):
with tf.gfile.GFile(f, mode="r") as target_file:
# Hack to filter C++/Java files. In theory some python comments could
# make the file be considered as C++ but in practice the chance of
# getting a false negative is low.
content = target_file.read()
if not any(p in content for p in pb_cst.filter_patterns):
code_files.append(f)
return CodingPbInfo(
desc_file=desc_file,
code_files=code_files
)
# The dataset contains problem from two different sources (CodeChef
# and CodeForces). Due to the limited number of samples, all problems from
# both sources are merged
for w in tf.gfile.Walk(data_rootdir):
if contains_samples(*w):
yield next_sample(*w)
| apache-2.0 |
henrykironde/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
sushantgoel/Introduction-Programming-Python | Solutions/Module7TaxesChallengeSolution.py | 19 | 1637 | #Declare and initialize your variables
country = ""
province = ""
orderTotal = 0
totalWithTax = 0
#I am declaring variables to hold the tax values used in the calculations
#That way if a tax rate changes, I only have to change it in one place instead
#of searching through my code to see where I had a specific numeric value and updating it
GST = .05
HST = .13
PST = .06
#Ask the user what country they are from
country = input("What country are you from? " )
#if they are from Canada ask which province...don't forget they may enter Canada as CANADA, Canada, canada, CAnada
#so convert the string to lowercase before you do the comparison
if country.lower() == "canada" :
province = input("Which province are you from? ")
#ask for the order total
orderTotal = float(input("What is your order total? "))
#Now add the taxes
#first check if they are from canada
if country.lower() == "canada" :
#if they are from canada, we have to change the calculation based on the province they specified
if province.lower() == "alberta" :
orderTotal = orderTotal + orderTotal * GST
elif province.lower() == "ontario" or province.lower() == "new brunswick" or province.lower() == "nova scotia" :
orderTotal = orderTotal + orderTotal * HST
else :
orderTotal = orderTotal + orderTotal * PST + orderTotal * GST
#if they are not from Canada there is no tax, so the amount they entered is the total with tax
#and no modification to orderTotal is required
#Now display the total with taxes to the user, don't forget to format the number
print("Your total including taxes comes to $%.2f " % orderTotal)
| apache-2.0 |
gmimano/commcaretest | corehq/apps/hqcase/tests/test_bugs.py | 2 | 2194 | import uuid
from dimagi.utils.parsing import json_format_datetime
from django.contrib.auth.models import User
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.util import post_case_blocks
from casexml.apps.case.xml import V2
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.apps.users.util import format_username
class OtaRestoreBugTest(TestCase):
def setUp(self):
for user in CouchUser.all():
user.delete()
User.objects.all().delete()
def testCrossDomainAssignments(self):
good_domain = 'main-domain'
create_domain(good_domain)
bad_domain = 'bad-domain'
create_domain(bad_domain)
user = CommCareUser.create(good_domain, format_username('user', good_domain), 'secret')
def _submit_case(domain):
case_id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=case_id,
case_name='donald',
case_type='duck',
user_id=user._id,
owner_id=user._id,
version=V2,
).as_xml(format_datetime=json_format_datetime)
post_case_blocks([case_block], {'domain': domain})
return CommCareCase.get(case_id)
good_case = _submit_case(good_domain)
# create a case in the "wrong" domain
# in the future this should actually fail completely
bad_case = _submit_case(bad_domain)
self.assertEqual(good_domain, good_case.domain)
self.assertEqual(bad_domain, bad_case.domain)
for case in (good_case, bad_case):
self.assertEqual(user._id, case.user_id)
self.assertEqual(user._id, case.owner_id)
restore_config = RestoreConfig(
user.to_casexml_user(), version=V2,
)
payload = restore_config.get_payload()
self.assertTrue(good_case._id in payload)
self.assertFalse(bad_case._id in payload)
| bsd-3-clause |
DreamerKing/LightweightHtmlWidgets | publish-rc/v1.0/files/Ipy.Lib/io.py | 191 | 3624 | """The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <guido@python.org>, "
"Mike Verdone <mike.verdone@gmail.com>, "
"Mark Russell <mark.russell@zen.co.uk>, "
"Antoine Pitrou <solipsis@pitrou.net>, "
"Amaury Forgeot d'Arc <amauryfa@gmail.com>, "
"Benjamin Peterson <benjamin@python.org>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import _io
import abc
from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open, FileIO, BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _io.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_io._IOBase):
__metaclass__ = abc.ABCMeta
class RawIOBase(_io._RawIOBase, IOBase):
pass
class BufferedIOBase(_io._BufferedIOBase, IOBase):
pass
class TextIOBase(_io._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
| gpl-3.0 |
ramusus/django-facebook-ads | facebook_ads/migrations/0006_auto__add_targeting__add_field_adgroup_targeting.py | 1 | 11777 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Targeting'
db.create_table('facebook_ads_targeting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('countries', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100, blank=True)),
('cities', self.gf('django.db.models.fields.CharField')(max_length=100)),
('zips', self.gf('django.db.models.fields.CharField')(max_length=100)),
('regions', self.gf('django.db.models.fields.CharField')(max_length=100)),
('radius', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('locales', self.gf('django.db.models.fields.CharField')(max_length=100)),
('keywords', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
('user_adclusters', self.gf('django.db.models.fields.CharField')(max_length=100)),
('interested_in', self.gf('django.db.models.fields.CharField')(max_length=100)),
('genders', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=5, null=True, blank=True)),
('age_min', self.gf('facebook_ads.fields.PositiveSmallIntegerRangeField')(null=True, blank=True)),
('age_max', self.gf('facebook_ads.fields.PositiveSmallIntegerRangeField')(null=True, blank=True)),
('broad_age', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('relationship_statuses', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100, null=True, blank=True)),
('user_event', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('excluded_connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('friends_of_connections', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('college_networks', self.gf('django.db.models.fields.CharField')(max_length=100)),
('work_networks', self.gf('django.db.models.fields.CharField')(max_length=100)),
('education_statuses', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
('college_years', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=100)),
('college_majors', self.gf('facebook_ads.fields.CommaSeparatedCharField')(max_length=100)),
))
db.send_create_signal('facebook_ads', ['Targeting'])
# Adding field 'AdGroup.targeting'
db.add_column('facebook_ads_adgroup', 'targeting', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['facebook_ads.Targeting'], unique=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'Targeting'
db.delete_table('facebook_ads_targeting')
# Deleting field 'AdGroup.targeting'
db.delete_column('facebook_ads_adgroup', 'targeting_id')
models = {
'facebook_ads.adaccount': {
'Meta': {'object_name': 'AdAccount'},
'account_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'account_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'business_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_country_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_street2': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'business_zip': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'daily_spend_limit': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_personal': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'timezone_id': ('django.db.models.fields.IntegerField', [], {}),
'timezone_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vat_status': ('django.db.models.fields.IntegerField', [], {})
},
'facebook_ads.adcampaign': {
'Meta': {'object_name': 'AdCampaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdAccount']"}),
'campaign_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'campaign_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'daily_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'daily_imps': ('django.db.models.fields.IntegerField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lifetime_budget': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {})
},
'facebook_ads.adcreative': {
'Meta': {'object_name': 'AdCreative'},
'auto_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '135'}),
'count_current_adgroups': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'creative_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'link_url': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'preview_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'related_fan_page': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'run_status': ('django.db.models.fields.SmallIntegerField', [], {}),
'story_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'view_tag': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_ads.adgroup': {
'Meta': {'object_name': 'AdGroup'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdAccount']"}),
'ad_id': ('django.db.models.fields.BigIntegerField', [], {}),
'ad_status': ('django.db.models.fields.IntegerField', [], {}),
'adgroup_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'adgroup_status': ('django.db.models.fields.IntegerField', [], {}),
'bid_type': ('django.db.models.fields.IntegerField', [], {}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook_ads.AdCampaign']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_bid': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'targeting': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['facebook_ads.Targeting']", 'unique': 'True'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {})
},
'facebook_ads.targeting': {
'Meta': {'object_name': 'Targeting'},
'age_max': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'age_min': ('facebook_ads.fields.PositiveSmallIntegerRangeField', [], {'null': 'True', 'blank': 'True'}),
'broad_age': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'cities': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_majors': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'college_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'college_years': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'countries': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100', 'blank': 'True'}),
'education_statuses': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'excluded_connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'friends_of_connections': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'genders': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'keywords': ('facebook_ads.fields.CommaSeparatedCharField', [], {'max_length': '100'}),
'locales': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'radius': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'regions': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'relationship_statuses': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_adclusters': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_event': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'work_networks': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zips': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['facebook_ads']
| bsd-3-clause |
ltilve/chromium | tools/perf/page_sets/tough_image_cases.py | 20 | 1041 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughImageCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughImageCasesPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
class ToughImageCasesPageSet(page_set_module.PageSet):
""" A collection of image-heavy sites. """
def __init__(self):
super(ToughImageCasesPageSet, self).__init__(
user_agent_type='desktop')
urls_list = [
'http://www.free-pictures-photos.com/aviation/airplane-306.jpg',
('http://upload.wikimedia.org/wikipedia/commons/c/cb/'
'General_history%2C_Alaska_Yukon_Pacific_Exposition%'
'2C_fully_illustrated_-_meet_me_in_Seattle_1909_-_Page_78.jpg')
]
for url in urls_list:
self.AddUserStory(ToughImageCasesPage(url, self))
| bsd-3-clause |
solintegra/addons | account/wizard/account_report_common.py | 342 | 10353 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.osv.orm import setup_modifiers
from openerp.tools.translate import _
class account_common_report(osv.osv_memory):
_name = "account.common.report"
_description = "Account Common Report"
def onchange_chart_id(self, cr, uid, ids, chart_account_id=False, context=None):
res = {}
if chart_account_id:
company_id = self.pool.get('account.account').browse(cr, uid, chart_account_id, context=context).company_id.id
now = time.strftime('%Y-%m-%d')
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
res['value'] = {'company_id': company_id, 'fiscalyear_id': fiscalyears and fiscalyears[0] or False}
return res
_columns = {
'chart_account_id': fields.many2one('account.account', 'Chart of Account', help='Select Charts of Accounts', required=True, domain = [('parent_id','=',False)]),
'company_id': fields.related('chart_account_id', 'company_id', type='many2one', relation='res.company', string='Company', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from': fields.many2one('account.period', 'Start Period'),
'period_to': fields.many2one('account.period', 'End Period'),
'journal_ids': fields.many2many('account.journal', string='Journals', required=True),
'date_from': fields.date("Start Date"),
'date_to': fields.date("End Date"),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _check_company_id(self, cr, uid, ids, context=None):
for wiz in self.browse(cr, uid, ids, context=context):
company_id = wiz.company_id.id
if wiz.fiscalyear_id and company_id != wiz.fiscalyear_id.company_id.id:
return False
if wiz.period_from and company_id != wiz.period_from.company_id.id:
return False
if wiz.period_to and company_id != wiz.period_to.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The fiscalyear, periods or chart of account chosen have to belong to the same company.', ['chart_account_id','fiscalyear_id','period_from','period_to']),
]
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(account_common_report, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context.get('active_model', False) == 'account.account':
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='chart_account_id']")
for node in nodes:
node.set('readonly', '1')
node.set('help', 'If you print the report from Account list/form view it will not consider Charts of account')
setup_modifiers(node, res['fields']['chart_account_id'])
res['arch'] = etree.tostring(doc)
return res
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {'value': {}}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': time.strftime('%Y-01-01'), 'date_to': time.strftime('%Y-%m-%d')}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.special = false
ORDER BY p.date_start ASC, p.special ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND p.special = false
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _get_account(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
accounts = self.pool.get('account.account').search(cr, uid, [('parent_id', '=', False), ('company_id', '=', user.company_id.id)], limit=1)
return accounts and accounts[0] or False
def _get_fiscalyear(self, cr, uid, context=None):
if context is None:
context = {}
now = time.strftime('%Y-%m-%d')
company_id = False
ids = context.get('active_ids', [])
if ids and context.get('active_model') == 'account.account':
company_id = self.pool.get('account.account').browse(cr, uid, ids[0], context=context).company_id.id
else: # use current company id
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
domain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]
fiscalyears = self.pool.get('account.fiscalyear').search(cr, uid, domain, limit=1)
return fiscalyears and fiscalyears[0] or False
def _get_all_journal(self, cr, uid, context=None):
return self.pool.get('account.journal').search(cr, uid ,[])
_defaults = {
'fiscalyear_id': _get_fiscalyear,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.common.report',context=c),
'journal_ids': _get_all_journal,
'filter': 'filter_no',
'chart_account_id': _get_account,
'target_move': 'posted',
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id' in data['form'] and data['form']['fiscalyear_id'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter'] == 'filter_date':
result['date_from'] = data['form']['date_from']
result['date_to'] = data['form']['date_to']
elif data['form']['filter'] == 'filter_period':
if not data['form']['period_from'] or not data['form']['period_to']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period.'))
result['period_from'] = data['form']['period_from']
result['period_to'] = data['form']['period_to']
return result
def _print_report(self, cr, uid, ids, data, context=None):
raise (_('Error!'), _('Not implemented.'))
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = {}
data['ids'] = context.get('active_ids', [])
data['model'] = context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(cr, uid, ids, ['date_from', 'date_to', 'fiscalyear_id', 'journal_ids', 'period_from', 'period_to', 'filter', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id', 'chart_account_id', 'period_from', 'period_to']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
used_context = self._build_contexts(cr, uid, ids, data, context=context)
data['form']['periods'] = used_context.get('periods', False) and used_context['periods'] or []
data['form']['used_context'] = dict(used_context, lang=context.get('lang', 'en_US'))
return self._print_report(cr, uid, ids, data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Rover-Yu/ali_kernel | scripts/gtp/add-ons/hotcode.py | 4 | 23660 | #!/usr/bin/python
# This script is used to find the hotcode in some tasks
# GPL
# Copyright(C) Hui Zhu (teawater@gmail.com), 2012
import gdb
import tempfile
import os
import signal
import sys
import traceback
import time
class hotcode_list:
def __init__(self):
self.function_list = {}
self.file_list = {}
self.line_list = {}
self.function_list_line = {}
self.file_list_line = {}
self.num = 0
class task:
def __init__(self, fid, user_dir):
self.fid = fid
self.user_dir = user_dir
self.kernel = hotcode_list()
self.user = hotcode_list()
debug_dir = "/usr/lib/debug/"
task_list = {}
no_task = False
kernel_hotcode_list = hotcode_list()
output_html = True
output_html_file = "./hotcode.html"
show_line_number_default = 20
show_line_number = show_line_number_default
#--------------------------------------------------------------------------------------------------
#For signal handler
from operator import itemgetter
def dict_sort(d, reverse=False):
#proposed in PEP 265, using the itemgetter
return sorted(d.iteritems(), key=itemgetter(1), reverse=True)
def hotcode_show_code_list(string, code_list):
if len(code_list) > 0:
print "\t", string
i = 1
for c in dict_sort(code_list):
print "\t", c[0], "\t\t", c[1]
i += 1
if i > show_line_number:
break
print
def hotcode_show():
if no_task:
hotcode_show_code_list("Hotest function", kernel_hotcode_list.function_list)
hotcode_show_code_list("Hotest file", kernel_hotcode_list.file_list)
hotcode_show_code_list("Hotest line", kernel_hotcode_list.line_list)
else:
for pid in task_list:
print "task", str(pid), task_list[pid].user_dir
print "Kernel hotcode:"
hotcode_show_code_list("Hotest function", task_list[pid].kernel.function_list)
hotcode_show_code_list("Hotest file", task_list[pid].kernel.file_list)
hotcode_show_code_list("Hotest line", task_list[pid].kernel.line_list)
print "User hotcode:"
hotcode_show_code_list("Hotest function", task_list[pid].user.function_list)
hotcode_show_code_list("Hotest file", task_list[pid].user.file_list)
hotcode_show_code_list("Hotest line", task_list[pid].user.line_list)
print
html_id = 0
def hotcode_list_to_output_html_fd_1(llist, tlist, fd):
global html_id
i = 1
for c in dict_sort(llist):
if tlist != None:
fd.write('''<tr><td onclick='sh("'''+str(html_id)+'''");'>'''+str(c[0])+'''</td><td style=" width: 10%; text-align: right;">'''+str(c[1])+'''</td></tr>''')
fd.write('''<tr><td style="text-align: center; display: none;" colspan="2" id="''' + str(html_id) + '''"><table style="width: 100%;" border="1" cellpadding="0" cellspacing="0"><tbody>''')
for d in dict_sort(tlist[c[0]]):
fd.write("<tr><td>" + str(d[0]) + '''</td><td style=" width: 10%; text-align: right;">''' + str(d[1]) + "</td></tr>")
fd.write('</tbody></table>')
else:
fd.write('<tr><td>'+str(c[0])+'''</td><td style=" width: 10%; text-align: right;">'''+str(c[1])+'''</td></tr>''')
i += 1
html_id += 1
if i > show_line_number:
break
def hotcode_list_to_output_html_fd(hlist, fd):
global html_id
fd.write('''<tr><td style="text-align: center;" colspan="2">Hot functions list</td></tr>''')
hotcode_list_to_output_html_fd_1(hlist.function_list, hlist.function_list_line, fd)
fd.write('''<tr><td style="text-align: center;" colspan="2">Hot file list</td></tr>''')
hotcode_list_to_output_html_fd_1(hlist.file_list, hlist.file_list_line, fd)
fd.write('''<tr><td style="text-align: center;" colspan="2">Hot line list</td></tr>''')
hotcode_list_to_output_html_fd_1(hlist.line_list, None, fd)
def hotcode_to_output_html_file():
global html_id
html_id = 0
fd = open(output_html_file, "w")
fd.write('''
<html><head><title>Hotcode</title>
<script>
<!--
function sh(id)
{
if(document.getElementById(id).style.display=='none') {
document.getElementById(id).style.display='block';
}
else {
document.getElementById(id).style.display='none';
}
}
-->
</script></head>
<body>
<div style="text-align: center;">This file is generated by KGTP (<a href="http://code.google.com/p/kgtp/">http://code.google.com/p/kgtp/</a>) in ''' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '''.</div>
<div style="text-align: center;">Click the function name or file name to see the detailed info.</div>''')
if show_line_number > 0:
fd.write('''<div style="text-align: center;">Just show top 20 of each list.</div>''')
if no_task:
fd.write('<br><br>')
fd.write('''<table style="margin-left: auto; margin-right: auto;" border="1" cellpadding="2" cellspacing="0"><tbody>''')
fd.write('''<tr><td><strong>Kernel space hotcode list</strong></td><td style=" width: 10%; text-align: right;">'''+str(kernel_hotcode_list.num)+'''</td></tr>''')
hotcode_list_to_output_html_fd(kernel_hotcode_list, fd)
fd.write('</tbody></table>')
else:
for pid in task_list:
fd.write('<br><br>')
fd.write('''<table style="margin-left: auto; margin-right: auto;" border="1" cellpadding="2" cellspacing="0"><tbody>''')
fd.write('''<tr><td style="text-align: center;" colspan="2">pid:''' + str(pid) + " " + task_list[pid].user_dir + "</td></tr>")
if trace_user:
fd.write('''<tr><td style="text-align: center;" colspan="2">User space hotcode list </td></tr>''')
fd.write('''<tr><td><strong>User space hotcode list</strong></td><td style=" width: 10%; text-align: right;">'''+str(task_list[pid].user.num)+'''</td></tr>''')
hotcode_list_to_output_html_fd(task_list[pid].user, fd)
if trace_kernel:
if trace_user:
fd.write('''<tr><td style="text-align: center;" colspan="2"></td></tr>''')
fd.write('''<tr><td><strong>Kernel space hotcode list</strong></td><td style=" width: 10%; text-align: right;">'''+str(task_list[pid].kernel.num)+'''</td></tr>''')
hotcode_list_to_output_html_fd(task_list[pid].kernel, fd)
fd.write('</tbody></table>')
fd.write('</body></html>')
fd.close()
print "Save", html_id, "entries."
def sigint_handler(num, e):
if output_html:
hotcode_to_output_html_file()
else:
hotcode_show()
try:
s = raw_input('Conitnue? [(y)es], (n)o:')
except:
s = 'y'
finally:
if s[0:1] != 'n' and s[0:1] != 'N':
return;
#gdb.execute("inferior 1")
try:
gdb.execute("tfind -1", True, False)
gdb.execute("target remote /sys/kernel/debug/gtp", True, False)
gdb.execute("set disconnected-tracing off", True, False)
except:
print "Try to stop GTP got error, please use command \"sudo rmmod gtp.ko\" stop it."
exit(1);
#--------------------------------------------------------------------------------------------------
#init
def add_inferior():
fid = gdb.execute("add-inferior", False, True)
if fid.find("Added inferior ") != 0:
return -1
fid = int(fid[len("Added inferior "):])
return fid
gdb.execute("set target-async on", True, False)
gdb.execute("set pagination off", True, False)
gdb.execute("set confirm off", True, False)
gdb.execute("set circular-trace-buffer on", True, False)
gdb.execute("set debug-file-directory "+debug_dir, True, False)
try:
gdb.execute("kill", True, False)
except:
pass
trace_user = True
trace_kernel = True
while 1:
tmp = "both"
try:
tmp = raw_input('Which part of code you want trace? [(b)oth], (u)ser, (k)ernel:')
except:
continue
if tmp[0:1] == 'U' or tmp[0:1] == 'u':
trace_kernel = False
elif tmp[0:1] == 'K' or tmp[0:1] == 'k':
trace_user = False
break
#Get which task pid why want to trace
print("Please input the pid of task that you want to trace - one per line.")
print("If not set any task, will trace all code in the Linux kernel.")
while 1:
pid = -1
try:
pid = input('task pid (use empty to stop pid input):')
except:
pass
if pid <= 0:
break
if pid in task_list:
print("This pid already in the list.")
continue
user_dir = ""
fid = 0
if trace_user:
try:
orig_user_dir = user_dir = os.path.realpath("/proc/"+str(pid)+"/exe")
except:
#maybe this is the kernel task
print "Cannot get the user code info of this pid, will not parse the user level code symbol"
task_list[pid] = task(fid, user_dir)
continue
if os.path.exists(debug_dir+user_dir):
user_dir = debug_dir+user_dir
while 1:
tmp = ""
try:
tmp = raw_input('Please input the debug binary of task if you want to change it ['+user_dir+']:')
except:
continue
if tmp != "":
user_dir = os.path.realpath(tmp)
break
if not os.path.exists(user_dir):
print "Cannot get the user code info of this pid, will not parse the user level code symbol"
task_list[pid] = task(fid, user_dir)
continue
print "Use "+user_dir+" as debug binary."
fid = add_inferior()
if fid < 0:
print "Try to load task got error."
continue
gdb.execute("inferior "+str(fid))
pfile = open("/proc/"+str(pid)+"/maps", "r")
tmplist = pfile.read().split(os.linesep)
pfile.close()
for c in tmplist:
c_list = c.split(" ")
filename = c_list[-1].strip()
if filename != orig_user_dir and os.path.exists(filename) and len(c_list) > 2 and len(c_list[1]) > 3 and c_list[1][2] == 'x':
addr = "0x"+c_list[0][0:c.find('-')]
gdb.execute("file "+filename)
info_files = gdb.execute("info files", True, True)
info_files_list = info_files.split(os.linesep)
text_offset = "0x0"
for line in info_files_list:
line_list = line.split(" is ")
if len(line_list) == 2 and line_list[1].strip() == ".text":
line_list[0] = line_list[0].strip()
text_offset = line_list[0][0:line_list[0].find(' - ')]
print ("add-symbol-file "+filename+" ("+addr+"+"+text_offset+")")
gdb.execute("add-symbol-file "+filename+" ("+addr+"+"+text_offset+")")
gdb.execute("file "+user_dir)
gdb.execute("inferior 1")
task_list[pid] = task(fid, user_dir)
def get_addr_range_list(fun):
buf = gdb.execute("info line "+fun, False, True)
line_list = buf.split(os.linesep)
ret = []
begin = -1
end = -1
for line in line_list:
addr_begin = line.find("starts at address ")
if addr_begin >= 0:
line = line[addr_begin + len("starts at address "):]
addr_end = line.find(" <"+fun)
if addr_end >= 0:
begin = int(line[:addr_end], 0)
line = line[addr_end + len(" <"+fun):]
addr_begin = line.find("ends at ")
if addr_begin >= 0:
line = line[addr_begin + len("ends at "):]
addr_end = line.find(" <"+fun)
if addr_end > 0:
end = int(line[:addr_end], 0)
if begin != -1:
ret.append([begin, end])
begin = -1
end = -1
if len(ret) > 0:
buf = gdb.execute("disassemble "+fun, False, True)
line_list = buf.split(os.linesep)
line_list.reverse()
end = 0
for line in line_list:
addr_begin = line.find("0x")
if addr_begin >= 0:
line = line[addr_begin:]
addr_end = line.find(" <+")
if addr_end > 0:
end = int(line[:addr_end], 0) + 1
break
if end != 0:
offset = 0
for c in ret:
if c[1] < end:
if offset == 0 or offset > (end - c[1]):
offset = end - c[1]
for c in ret:
c[1] += offset
return ret
def get_ignore_str(function):
ret = ""
try:
s = raw_input('Do you want to ignore function \"'+function+'\"? [(y)es], (n)o:')
except:
s = 'y'
if s[0:1] != 'n' and s[0:1] != 'N':
r_list = get_addr_range_list(function)
for r in r_list:
if ret != "":
ret += " && "
else:
ret += "&& ("
#(regs->ip < r[0] || regs->ip > r[1])
ret += "($p_ip < "+str(r[0])+" || $p_ip > "+str(r[1])+")"
if ret != "":
ret += ")"
return ret
if len(task_list) == 0:
trace_user = False
trace_kernel = True
no_task = True
try:
s = raw_input('Which way you want to output hotcode info when ctrl-c? [(h)tml], (t)ty:')
except:
s = 'h'
if s[0:1] == 't' or s[0:1] == 'T':
output_html = False
else:
output_html = True
if output_html:
while 1:
try:
s = raw_input('Which file you want to save the html output? [' + output_html_file + ']:')
if os.path.exists(s):
if os.path.isfile(s):
s = raw_input('File ' + s +' exist, do you want to over write it? (y)es, [(n)o]:')
if s[0:1] != 'y' and s[0:1] != 'Y':
continue
else:
print 'File ' + s +' exist, but it cannot be written. Please choice another file.'
continue
except:
continue
if len(s) > 0:
output_html_file = s
break
try:
show_line_number = input('Show line number (0 meas all)? ['+str(show_line_number)+']:')
except:
show_line_number = show_line_number_default
#Set tracepoint
gdb.execute("target remote /sys/kernel/debug/gtp", True, False)
try:
gdb.execute("tstop", True, False)
gdb.execute("delete", True, False)
except:
pass
def getmod():
#following code is get from ../getmod.py
#use the code directly because sys.argv = [''] inside GDB
def format_file(name):
tmp = ""
for c in name:
if c == "_":
c = "-"
tmp += c
return tmp
#Check if the target is available
if str(gdb.selected_thread()) == "None":
raise gdb.error("Please connect to Linux Kernel before use the script.")
#Output the help
print "Use GDB command \"set $mod_search_dir=dir\" to set an directory for search the modules."
ignore_gtp_ko = gdb.parse_and_eval("$ignore_gtp_ko")
if ignore_gtp_ko.type.code == gdb.TYPE_CODE_INT:
ignore_gtp_ko = int(ignore_gtp_ko)
else:
ignore_gtp_ko = 1
#Get the mod_search_dir
mod_search_dir_list = []
#Get dir from $mod_search_dir
tmp_dir = gdb.parse_and_eval("$mod_search_dir")
if tmp_dir.type.code == gdb.TYPE_CODE_ARRAY:
tmp_dir = str(tmp_dir)
tmp_dir = tmp_dir[1:len(tmp_dir)]
tmp_dir = tmp_dir[0:tmp_dir.index("\"")]
mod_search_dir_list.append(tmp_dir)
#Get dir that same with current vmlinux
tmp_dir = str(gdb.execute("info files", False, True))
tmp_dir = tmp_dir[tmp_dir.index("Symbols from \"")+len("Symbols from \""):len(tmp_dir)]
tmp_dir = tmp_dir[0:tmp_dir.index("\"")]
tmp_dir = tmp_dir[0:tmp_dir.rindex("/")]
mod_search_dir_list.append(tmp_dir)
#Get the dir of current Kernel
tmp_dir = "/lib/modules/" + str(os.uname()[2])
if os.path.isdir(tmp_dir):
mod_search_dir_list.append(tmp_dir)
#Let user choice dir
mod_search_dir = ""
while mod_search_dir == "":
for i in range(0, len(mod_search_dir_list)):
print str(i)+". "+mod_search_dir_list[i]
try:
s = input('Select a directory for search the modules [0]:')
except SyntaxError:
s = 0
except:
continue
if s < 0 or s >= len(mod_search_dir_list):
continue
mod_search_dir = mod_search_dir_list[s]
mod_list_offset = long(gdb.parse_and_eval("((size_t) &(((struct module *)0)->list))"))
mod_list = long(gdb.parse_and_eval("(&modules)"))
mod_list_current = mod_list
while 1:
mod_list_current = long(gdb.parse_and_eval("((struct list_head *) "+str(mod_list_current)+")->next"))
#check if need break the loop
if mod_list == mod_list_current:
break
mod = mod_list_current - mod_list_offset
#get mod_name
mod_name = str(gdb.parse_and_eval("((struct module *)"+str(mod)+")->name"))
mod_name = mod_name[mod_name.index("\"")+1:len(mod_name)]
mod_name = mod_name[0:mod_name.index("\"")]
if mod_name == "fglrx":
contiue
mod_name += ".ko"
mod_name = format_file(mod_name)
#get mod_dir_name
mod_dir_name = ""
for root, dirs, files in os.walk(mod_search_dir):
for afile in files:
tmp_file = format_file(afile)
if tmp_file == mod_name:
mod_dir_name = os.path.join(root,afile)
break
if mod_dir_name != "":
break
command = " "
#Add module_core to command
command += str(gdb.parse_and_eval("((struct module *)"+str(mod)+")->module_core"))
#Add each sect_attrs->attrs to command
#get nsections
nsections = int(gdb.parse_and_eval("((struct module *)"+str(mod)+")->sect_attrs->nsections"))
sect_attrs = long(gdb.parse_and_eval("(u64)((struct module *)"+str(mod)+")->sect_attrs"))
for i in range(0, nsections):
command += " -s"
tmp = str(gdb.parse_and_eval("((struct module_sect_attrs *)"+str(sect_attrs)+")->attrs["+str(i)+"].name"))
tmp = tmp[tmp.index("\"")+1:len(tmp)]
tmp = tmp[0:tmp.index("\"")]
command += " "+tmp
tmp = str(gdb.parse_and_eval("((struct module_sect_attrs *)"+str(sect_attrs)+")->attrs["+str(i)+"].address"))
command += " "+tmp
if mod_dir_name == "":
print "Cannot find out",mod_name,"from directory."
print "Please use following command load the symbols from it:"
print "add-symbol-file some_dir/"+mod_name+command
else:
if ignore_gtp_ko and mod_name == "gtp.ko":
pass
else:
#print "add-symbol-file "+mod_dir_name+command
gdb.execute("add-symbol-file "+mod_dir_name+command, False, False)
if trace_kernel:
try:
s = raw_input('Do you load the symbol from LKM? (y)es, [(n)o]:')
except:
s = 'n'
if s[0:1] == 'y' or s[0:1] == 'Y':
getmod()
cpu_number = int(gdb.parse_and_eval("$cpu_number"))
tempfilename = tempfile.mktemp()
tempfile = open(tempfilename, "w")
if no_task:
ignore_str = ""
#Setup first tracepoint
ignore_str += get_ignore_str("arch_local_irq_enable")
ignore_str += get_ignore_str("intel_idle")
# GDB have bug with long conditon so close them
#ignore_str += get_ignore_str("__do_softirq")
#ignore_str += get_ignore_str("_raw_spin_unlock_irqrestore")
tempfile.write("tvariable $p_ip\n")
tempfile.write("tvariable $p_cs\n")
tempfile.write("trace handle_irq\n")
tempfile.write("commands\n")
tempfile.write("teval $p_ip=(u64)regs->ip\n")
tempfile.write("teval $p_cs=(u64)regs->cs\n")
tempfile.write("end\n")
#Setup second tracepoint
tempfile.write("trace handle_irq\n")
cond_str = " (($p_cs & 3) == 0)"
tempfile.write("condition $bpnum "+cond_str+ignore_str+"\n")
tempfile.write("commands\n")
tempfile.write("collect $no_self_trace\n")
tempfile.write("collect $p_ip\n")
tempfile.write("end\n")
tempfile.write("trace smp_apic_timer_interrupt\n")
tempfile.write("commands\n")
tempfile.write("teval $p_ip=(u64)regs->ip\n")
tempfile.write("teval $p_cs=(u64)regs->cs\n")
tempfile.write("end\n")
#Setup second tracepoint
tempfile.write("trace smp_apic_timer_interrupt\n")
cond_str = " (($p_cs & 3) == 0)"
tempfile.write("condition $bpnum "+cond_str+ignore_str+"\n")
tempfile.write("commands\n")
tempfile.write("collect $no_self_trace\n")
tempfile.write("collect $p_ip\n")
tempfile.write("end\n")
else:
pid_str = ""
for pid in task_list:
if pid_str != "":
pid_str += " || "
else:
pid_str += "("
pid_str += "($current_task_pid == "+str(pid)+") "
if pid_str != "":
pid_str += ")"
cond_str = ""
if not trace_user:
if pid_str != "":
cond_str += " && "
cond_str += " ((regs->cs & 3) == 0)"
elif not trace_kernel:
if pid_str != "":
cond_str += "&&"
cond_str += " ((regs->cs & 3) == 3)"
tempfile.write("trace handle_irq\n")
tempfile.write("condition $bpnum "+pid_str+cond_str+"\n")
tempfile.write("commands\n")
tempfile.write("collect regs->ip\n")
if trace_user and trace_kernel:
tempfile.write("collect regs->cs\n")
tempfile.write("collect $current_task_pid\n")
tempfile.write("end\n")
tempfile.write("trace smp_apic_timer_interrupt\n")
tempfile.write("condition $bpnum "+pid_str+cond_str+"\n")
tempfile.write("commands\n")
tempfile.write("collect regs->ip\n")
if trace_user and trace_kernel:
tempfile.write("collect regs->cs\n")
tempfile.write("collect $current_task_pid\n")
tempfile.write("end\n")
tempfile.close()
tempfile = open(tempfilename, "r")
print "Tracepoint command:"
print tempfile.read()
tempfile.close()
gdb.execute("source "+tempfilename, True, False)
os.remove(tempfilename)
gdb.execute("set disconnected-tracing on", True, False)
gdb.execute("tstart")
gdb.execute("kill", True, False)
signal.signal(signal.SIGINT, sigint_handler);
signal.siginterrupt(signal.SIGINT, False);
#Connect to pipe
gdb.execute("target tfile /sys/kernel/debug/gtpframe_pipe")
#--------------------------------------------------------------------------------------------------
#cycle
def add_line_to_list(line, line_list):
if line in line_list:
line_list[line] += 1
else:
line_list[line] = 1
#info[0] line_num, info[1] file_name, info[2] function_name
def add_info_to_code_list(info, code_list):
line = str(info[1]) + ":" + str(info[0])
#function_list
if info[2] in code_list.function_list:
code_list.function_list[info[2]] += 1
else:
code_list.function_list[info[2]] = 1
code_list.function_list_line[info[2]] = {}
add_line_to_list(line, code_list.function_list_line[info[2]])
#file_list
if info[1] in code_list.file_list:
code_list.file_list[info[1]] += 1
else:
code_list.file_list[info[1]] = 1
code_list.file_list_line[info[1]] = {}
add_line_to_list(line, code_list.file_list_line[info[1]])
#line_list
add_line_to_list(line, code_list.line_list)
#num
code_list.num += 1
def task_list_add_line(is_user, pid, info):
global task_list
if no_task:
add_info_to_code_list (info, kernel_hotcode_list)
else:
if is_user:
add_info_to_code_list (info, task_list[pid].user)
else:
add_info_to_code_list (info, task_list[pid].kernel)
def get_line_from_sym(sym):
sym = sym.rstrip(os.linesep)
#Get line_num and file_name
begin = sym.find("Line ")
end = sym.find("\" starts at address")
line_num = None
file_name = None
if begin >= 0 and end > 0 and begin + len("Line ") < end:
line = sym[begin + len("Line "):end]
line = line.split(" of \"")
if len(line) == 2:
line_num = line[0]
file_name = line[1]
sym = sym[end:]
#Get function_name
begin = sym.find("<")
end = sym.find(">")
if begin >= 0 and end > 0 and begin + 1 < end:
function_name = sym[begin + 1:end]
end = function_name.rfind("+")
if end > 0:
function_name = function_name[:end]
sym = gdb.execute("info symbol "+function_name, True, True).rstrip(os.linesep)
begin = sym.rfind(" of ")
if begin > 0:
begin += len(" of ")
function_name = sym[begin:] + ":" + function_name
else:
function_name = None
return (line_num, file_name, function_name)
if no_task:
while 1:
try:
gdb.execute("tfind 0", False, True)
sym = gdb.execute("info line *($p_ip - 1)", True, True)
line = get_line_from_sym(sym)
task_list_add_line(False, 0, line)
except gdb.error, x:
print("Drop one entry because:")
for file, lineno, function, text in traceback.extract_tb(sys.exc_info()[2]):
print file, lineno, function, text
except gdb.MemoryError, x:
print("Drop one entry because:")
for file, lineno, function, text in traceback.extract_tb(sys.exc_info()[2]):
print file, lineno, function, text
try:
gdb.execute("tfind 1", False, True)
except:
pass
else:
while 1:
try:
gdb.execute("tfind 0", False, True)
is_user = False
pid = long(gdb.parse_and_eval("$current_task_pid"))
if not pid in task_list:
raise gdb.error ("Cannot find inferior for pid "+ str(pid) +", drop one entry.")
if trace_user and (not trace_kernel or long(gdb.parse_and_eval("regs->cs & 3")) == 3):
is_user = True
ip = long(gdb.parse_and_eval("regs->ip - 1"))
gdb.execute("inferior "+str(task_list[pid].fid), False, True)
sym = gdb.execute("info line *"+str(ip), True, True)
else:
sym = gdb.execute("info line *(regs->ip - 1)", True, True)
line = get_line_from_sym(sym)
if is_user:
gdb.execute("inferior 1", False, True)
task_list_add_line(is_user, pid, line)
except gdb.error, x:
print("Drop one entry because:")
for file, lineno, function, text in traceback.extract_tb(sys.exc_info()[2]):
print file, lineno, function, text
try:
gdb.execute("inferior 1", False, True)
except:
pass
except gdb.MemoryError, x:
print("Drop one entry because:")
for file, lineno, function, text in traceback.extract_tb(sys.exc_info()[2]):
print file, lineno, function, text
try:
gdb.execute("inferior 1", False, True)
except:
pass
try:
gdb.execute("tfind 1", False, True)
except:
pass
| gpl-2.0 |
bankonme/www.freedomsponsors.org | djangoproject/bitcoin_frespo/migrations/0002_auto__add_receiveaddress.py | 3 | 1231 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReceiveAddress'
db.create_table('bitcoin_frespo_receiveaddress', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('available', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('bitcoin_frespo', ['ReceiveAddress'])
def backwards(self, orm):
# Deleting model 'ReceiveAddress'
db.delete_table('bitcoin_frespo_receiveaddress')
models = {
'bitcoin_frespo.receiveaddress': {
'Meta': {'object_name': 'ReceiveAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['bitcoin_frespo'] | agpl-3.0 |
NL66278/odoo | openerp/addons/base/tests/test_expression.py | 260 | 29589 | import unittest2
import openerp
from openerp.osv.expression import get_unaccent_wrapper
from openerp.osv.orm import BaseModel
import openerp.tests.common as common
class test_expression(common.TransactionCase):
def _reinit_mock(self):
self.query_list = list()
def _mock_base_model_where_calc(self, model, *args, **kwargs):
""" Mock build_email to be able to test its values. Store them into
some internal variable for latter processing. """
self.query_list.append(self._base_model_where_calc(model, *args, **kwargs))
# return the lastly stored query, the one the ORM wants to perform
return self.query_list[-1]
def setUp(self):
super(test_expression, self).setUp()
# Mock BaseModel._where_calc(), to be able to proceed to some tests about generated expression
self._reinit_mock()
self._base_model_where_calc = BaseModel._where_calc
BaseModel._where_calc = lambda model, cr, uid, args, context: self._mock_base_model_where_calc(model, cr, uid, args, context)
def tearDown(self):
# Remove mocks
BaseModel._where_calc = self._base_model_where_calc
super(test_expression, self).tearDown()
def test_00_in_not_in_m2m(self):
registry, cr, uid = self.registry, self.cr, self.uid
# Create 4 partners with no category, or one or two categories (out of two categories).
categories = registry('res.partner.category')
cat_a = categories.create(cr, uid, {'name': 'test_expression_category_A'})
cat_b = categories.create(cr, uid, {'name': 'test_expression_category_B'})
partners = registry('res.partner')
a = partners.create(cr, uid, {'name': 'test_expression_partner_A', 'category_id': [(6, 0, [cat_a])]})
b = partners.create(cr, uid, {'name': 'test_expression_partner_B', 'category_id': [(6, 0, [cat_b])]})
ab = partners.create(cr, uid, {'name': 'test_expression_partner_AB', 'category_id': [(6, 0, [cat_a, cat_b])]})
c = partners.create(cr, uid, {'name': 'test_expression_partner_C'})
# The tests.
# On a one2many or many2many field, `in` should be read `contains` (and
# `not in` should be read `doesn't contain`.
with_a = partners.search(cr, uid, [('category_id', 'in', [cat_a])])
self.assertEqual(set([a, ab]), set(with_a), "Search for category_id in cat_a failed.")
with_b = partners.search(cr, uid, [('category_id', 'in', [cat_b])])
self.assertEqual(set([ab, b]), set(with_b), "Search for category_id in cat_b failed.")
# Partners with the category A or the category B.
with_a_or_b = partners.search(cr, uid, [('category_id', 'in', [cat_a, cat_b])])
self.assertEqual(set([ab, a, b]), set(with_a_or_b), "Search for category_id contains cat_a or cat_b failed.")
# Show that `contains list` is really `contains element or contains element`.
with_a_or_with_b = partners.search(cr, uid, ['|', ('category_id', 'in', [cat_a]), ('category_id', 'in', [cat_b])])
self.assertEqual(set([ab, a, b]), set(with_a_or_with_b), "Search for category_id contains cat_a or contains cat_b failed.")
# If we change the OR in AND...
with_a_and_b = partners.search(cr, uid, [('category_id', 'in', [cat_a]), ('category_id', 'in', [cat_b])])
self.assertEqual(set([ab]), set(with_a_and_b), "Search for category_id contains cat_a and cat_b failed.")
# Partners without category A and without category B.
without_a_or_b = partners.search(cr, uid, [('category_id', 'not in', [cat_a, cat_b])])
self.assertTrue(all(i not in without_a_or_b for i in [a, b, ab]), "Search for category_id doesn't contain cat_a or cat_b failed (1).")
self.assertTrue(c in without_a_or_b, "Search for category_id doesn't contain cat_a or cat_b failed (2).")
# Show that `doesn't contain list` is really `doesn't contain element and doesn't contain element`.
without_a_and_without_b = partners.search(cr, uid, [('category_id', 'not in', [cat_a]), ('category_id', 'not in', [cat_b])])
self.assertTrue(all(i not in without_a_and_without_b for i in [a, b, ab]), "Search for category_id doesn't contain cat_a and cat_b failed (1).")
self.assertTrue(c in without_a_and_without_b, "Search for category_id doesn't contain cat_a and cat_b failed (2).")
# We can exclude any partner containing the category A.
without_a = partners.search(cr, uid, [('category_id', 'not in', [cat_a])])
self.assertTrue(a not in without_a, "Search for category_id doesn't contain cat_a failed (1).")
self.assertTrue(ab not in without_a, "Search for category_id doesn't contain cat_a failed (2).")
self.assertTrue(set([b, c]).issubset(set(without_a)), "Search for category_id doesn't contain cat_a failed (3).")
# (Obviously we can do the same for cateory B.)
without_b = partners.search(cr, uid, [('category_id', 'not in', [cat_b])])
self.assertTrue(b not in without_b, "Search for category_id doesn't contain cat_b failed (1).")
self.assertTrue(ab not in without_b, "Search for category_id doesn't contain cat_b failed (2).")
self.assertTrue(set([a, c]).issubset(set(without_b)), "Search for category_id doesn't contain cat_b failed (3).")
# We can't express the following: Partners with a category different than A.
# with_any_other_than_a = ...
# self.assertTrue(a not in with_any_other_than_a, "Search for category_id with any other than cat_a failed (1).")
# self.assertTrue(ab in with_any_other_than_a, "Search for category_id with any other than cat_a failed (2).")
def test_10_expression_parse(self):
# TDE note: those tests have been added when refactoring the expression.parse() method.
# They come in addition to the already existing test_osv_expression.yml; maybe some tests
# will be a bit redundant
registry, cr, uid = self.registry, self.cr, self.uid
users_obj = registry('res.users')
# Create users
a = users_obj.create(cr, uid, {'name': 'test_A', 'login': 'test_A'})
b1 = users_obj.create(cr, uid, {'name': 'test_B', 'login': 'test_B'})
b1_user = users_obj.browse(cr, uid, [b1])[0]
b2 = users_obj.create(cr, uid, {'name': 'test_B2', 'login': 'test_B2', 'parent_id': b1_user.partner_id.id})
# Test1: simple inheritance
user_ids = users_obj.search(cr, uid, [('name', 'like', 'test')])
self.assertEqual(set(user_ids), set([a, b1, b2]), 'searching through inheritance failed')
user_ids = users_obj.search(cr, uid, [('name', '=', 'test_B')])
self.assertEqual(set(user_ids), set([b1]), 'searching through inheritance failed')
# Test2: inheritance + relational fields
user_ids = users_obj.search(cr, uid, [('child_ids.name', 'like', 'test_B')])
self.assertEqual(set(user_ids), set([b1]), 'searching through inheritance failed')
# Special =? operator mean "is equal if right is set, otherwise always True"
user_ids = users_obj.search(cr, uid, [('name', 'like', 'test'), ('parent_id', '=?', False)])
self.assertEqual(set(user_ids), set([a, b1, b2]), '(x =? False) failed')
user_ids = users_obj.search(cr, uid, [('name', 'like', 'test'), ('parent_id', '=?', b1_user.partner_id.id)])
self.assertEqual(set(user_ids), set([b2]), '(x =? id) failed')
def test_20_auto_join(self):
registry, cr, uid = self.registry, self.cr, self.uid
unaccent = get_unaccent_wrapper(cr)
# Get models
partner_obj = registry('res.partner')
state_obj = registry('res.country.state')
bank_obj = registry('res.partner.bank')
# Get test columns
partner_state_id_col = partner_obj._columns.get('state_id') # many2one on res.partner to res.country.state
partner_parent_id_col = partner_obj._columns.get('parent_id') # many2one on res.partner to res.partner
state_country_id_col = state_obj._columns.get('country_id') # many2one on res.country.state on res.country
partner_child_ids_col = partner_obj._columns.get('child_ids') # one2many on res.partner to res.partner
partner_bank_ids_col = partner_obj._columns.get('bank_ids') # one2many on res.partner to res.partner.bank
category_id_col = partner_obj._columns.get('category_id') # many2many on res.partner to res.partner.category
# Get the first bank account type to be able to create a res.partner.bank
bank_type = bank_obj._bank_type_get(cr, uid)[0]
# Get country/state data
country_us_id = registry('res.country').search(cr, uid, [('code', 'like', 'US')])[0]
state_ids = registry('res.country.state').search(cr, uid, [('country_id', '=', country_us_id)], limit=2)
# Create demo data: partners and bank object
p_a = partner_obj.create(cr, uid, {'name': 'test__A', 'state_id': state_ids[0]})
p_b = partner_obj.create(cr, uid, {'name': 'test__B', 'state_id': state_ids[1]})
p_aa = partner_obj.create(cr, uid, {'name': 'test__AA', 'parent_id': p_a, 'state_id': state_ids[0]})
p_ab = partner_obj.create(cr, uid, {'name': 'test__AB', 'parent_id': p_a, 'state_id': state_ids[1]})
p_ba = partner_obj.create(cr, uid, {'name': 'test__BA', 'parent_id': p_b, 'state_id': state_ids[0]})
b_aa = bank_obj.create(cr, uid, {'name': '__bank_test_a', 'state': bank_type[0], 'partner_id': p_aa, 'acc_number': '1234'})
b_ab = bank_obj.create(cr, uid, {'name': '__bank_test_b', 'state': bank_type[0], 'partner_id': p_ab, 'acc_number': '5678'})
b_ba = bank_obj.create(cr, uid, {'name': '__bank_test_b', 'state': bank_type[0], 'partner_id': p_ba, 'acc_number': '9876'})
# --------------------------------------------------
# Test1: basics about the attribute
# --------------------------------------------------
category_id_col._auto_join = True
self.assertRaises(NotImplementedError, partner_obj.search, cr, uid, [('category_id.name', '=', 'foo')])
category_id_col._auto_join = False
# --------------------------------------------------
# Test2: one2many
# --------------------------------------------------
name_test = 'test_a'
# Do: one2many without _auto_join
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('bank_ids.name', 'like', name_test)])
# Test result
self.assertEqual(set(partner_ids), set([p_aa]),
"_auto_join off: ('bank_ids.name', 'like', '..'): incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 3,
"_auto_join off: ('bank_ids.name', 'like', '..') should produce 3 queries (1 in res_partner_bank, 2 on res_partner)")
sql_query = self.query_list[0].get_sql()
self.assertIn('res_partner_bank', sql_query[0],
"_auto_join off: ('bank_ids.name', 'like', '..') first query incorrect main table")
expected = "%s::text like %s" % (unaccent('"res_partner_bank"."name"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join off: ('bank_ids.name', 'like', '..') first query incorrect where condition")
self.assertEqual(set(['%' + name_test + '%']), set(sql_query[2]),
"_auto_join off: ('bank_ids.name', 'like', '..') first query incorrect parameter")
sql_query = self.query_list[2].get_sql()
self.assertIn('res_partner', sql_query[0],
"_auto_join off: ('bank_ids.name', 'like', '..') third query incorrect main table")
self.assertIn('"res_partner"."id" in (%s)', sql_query[1],
"_auto_join off: ('bank_ids.name', 'like', '..') third query incorrect where condition")
self.assertEqual(set([p_aa]), set(sql_query[2]),
"_auto_join off: ('bank_ids.name', 'like', '..') third query incorrect parameter")
# Do: cascaded one2many without _auto_join
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('child_ids.bank_ids.id', 'in', [b_aa, b_ba])])
# Test result
self.assertEqual(set(partner_ids), set([p_a, p_b]),
"_auto_join off: ('child_ids.bank_ids.id', 'in', [..]): incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 5,
"_auto_join off: ('child_ids.bank_ids.id', 'in', [..]) should produce 5 queries (1 in res_partner_bank, 4 on res_partner)")
# Do: one2many with _auto_join
partner_bank_ids_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('bank_ids.name', 'like', 'test_a')])
# Test result
self.assertEqual(set(partner_ids), set([p_aa]),
"_auto_join on: ('bank_ids.name', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 1,
"_auto_join on: ('bank_ids.name', 'like', '..') should produce 1 query")
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on: ('bank_ids.name', 'like', '..') query incorrect main table")
self.assertIn('"res_partner_bank" as "res_partner__bank_ids"', sql_query[0],
"_auto_join on: ('bank_ids.name', 'like', '..') query incorrect join")
expected = "%s::text like %s" % (unaccent('"res_partner__bank_ids"."name"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join on: ('bank_ids.name', 'like', '..') query incorrect where condition")
self.assertIn('"res_partner"."id"="res_partner__bank_ids"."partner_id"', sql_query[1],
"_auto_join on: ('bank_ids.name', 'like', '..') query incorrect join condition")
self.assertEqual(set(['%' + name_test + '%']), set(sql_query[2]),
"_auto_join on: ('bank_ids.name', 'like', '..') query incorrect parameter")
# Do: one2many with _auto_join, test final leaf is an id
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('bank_ids.id', 'in', [b_aa, b_ab])])
# Test result
self.assertEqual(set(partner_ids), set([p_aa, p_ab]),
"_auto_join on: ('bank_ids.id', 'in', [..]) incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 1,
"_auto_join on: ('bank_ids.id', 'in', [..]) should produce 1 query")
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on: ('bank_ids.id', 'in', [..]) query incorrect main table")
self.assertIn('"res_partner__bank_ids"."id" in (%s,%s)', sql_query[1],
"_auto_join on: ('bank_ids.id', 'in', [..]) query incorrect where condition")
self.assertEqual(set([b_aa, b_ab]), set(sql_query[2]),
"_auto_join on: ('bank_ids.id', 'in', [..]) query incorrect parameter")
# Do: 2 cascaded one2many with _auto_join, test final leaf is an id
partner_child_ids_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('child_ids.bank_ids.id', 'in', [b_aa, b_ba])])
# Test result
self.assertEqual(set(partner_ids), set([p_a, p_b]),
"_auto_join on: ('child_ids.bank_ids.id', 'not in', [..]): incorrect result")
# # Test produced queries
self.assertEqual(len(self.query_list), 1,
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) should produce 1 query")
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) incorrect main table")
self.assertIn('"res_partner" as "res_partner__child_ids"', sql_query[0],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect join")
self.assertIn('"res_partner_bank" as "res_partner__child_ids__bank_ids"', sql_query[0],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect join")
self.assertIn('"res_partner__child_ids__bank_ids"."id" in (%s,%s)', sql_query[1],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect where condition")
self.assertIn('"res_partner"."id"="res_partner__child_ids"."parent_id"', sql_query[1],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect join condition")
self.assertIn('"res_partner__child_ids"."id"="res_partner__child_ids__bank_ids"."partner_id"', sql_query[1],
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect join condition")
self.assertEqual(set([b_aa, b_ba]), set(sql_query[2][-2:]),
"_auto_join on: ('child_ids.bank_ids.id', 'in', [..]) query incorrect parameter")
# --------------------------------------------------
# Test3: many2one
# --------------------------------------------------
name_test = 'US'
# Do: many2one without _auto_join
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b, p_aa, p_ab, p_ba]).issubset(set(partner_ids)),
"_auto_join off: ('state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 3,
"_auto_join off: ('state_id.country_id.code', 'like', '..') should produce 3 queries (1 on res_country, 1 on res_country_state, 1 on res_partner)")
# Do: many2one with 1 _auto_join on the first many2one
partner_state_id_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b, p_aa, p_ab, p_ba]).issubset(set(partner_ids)),
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 2,
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') should produce 2 query")
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_country"', sql_query[0],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect main table")
expected = "%s::text like %s" % (unaccent('"res_country"."code"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect where condition")
self.assertEqual(['%' + name_test + '%'], sql_query[2],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect parameter")
sql_query = self.query_list[1].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect main table")
self.assertIn('"res_country_state" as "res_partner__state_id"', sql_query[0],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect join")
self.assertIn('"res_partner__state_id"."country_id" in (%s)', sql_query[1],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect where condition")
self.assertIn('"res_partner"."state_id"="res_partner__state_id"."id"', sql_query[1],
"_auto_join on for state_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect join condition")
# Do: many2one with 1 _auto_join on the second many2one
partner_state_id_col._auto_join = False
state_country_id_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b, p_aa, p_ab, p_ba]).issubset(set(partner_ids)),
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 2,
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') should produce 2 query")
# -- first query
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_country_state"', sql_query[0],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect main table")
self.assertIn('"res_country" as "res_country_state__country_id"', sql_query[0],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect join")
expected = "%s::text like %s" % (unaccent('"res_country_state__country_id"."code"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect where condition")
self.assertIn('"res_country_state"."country_id"="res_country_state__country_id"."id"', sql_query[1],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect join condition")
self.assertEqual(['%' + name_test + '%'], sql_query[2],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 1 incorrect parameter")
# -- second query
sql_query = self.query_list[1].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect main table")
self.assertIn('"res_partner"."state_id" in', sql_query[1],
"_auto_join on for country_id: ('state_id.country_id.code', 'like', '..') query 2 incorrect where condition")
# Do: many2one with 2 _auto_join
partner_state_id_col._auto_join = True
state_country_id_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b, p_aa, p_ab, p_ba]).issubset(set(partner_ids)),
"_auto_join on: ('state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 1,
"_auto_join on: ('state_id.country_id.code', 'like', '..') should produce 1 query")
sql_query = self.query_list[0].get_sql()
self.assertIn('"res_partner"', sql_query[0],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect main table")
self.assertIn('"res_country_state" as "res_partner__state_id"', sql_query[0],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect join")
self.assertIn('"res_country" as "res_partner__state_id__country_id"', sql_query[0],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect join")
expected = "%s::text like %s" % (unaccent('"res_partner__state_id__country_id"."code"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect where condition")
self.assertIn('"res_partner"."state_id"="res_partner__state_id"."id"', sql_query[1],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect join condition")
self.assertIn('"res_partner__state_id"."country_id"="res_partner__state_id__country_id"."id"', sql_query[1],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect join condition")
self.assertEqual(['%' + name_test + '%'], sql_query[2],
"_auto_join on: ('state_id.country_id.code', 'like', '..') query incorrect parameter")
# --------------------------------------------------
# Test4: domain attribute on one2many fields
# --------------------------------------------------
partner_child_ids_col._auto_join = True
partner_bank_ids_col._auto_join = True
partner_child_ids_col._domain = lambda self: ['!', ('name', '=', self._name)]
partner_bank_ids_col._domain = [('acc_number', 'like', '1')]
# Do: 2 cascaded one2many with _auto_join, test final leaf is an id
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, ['&', (1, '=', 1), ('child_ids.bank_ids.id', 'in', [b_aa, b_ba])])
# Test result: at least one of our added data
self.assertTrue(set([p_a]).issubset(set(partner_ids)),
"_auto_join on one2many with domains incorrect result")
self.assertTrue(set([p_ab, p_ba]) not in set(partner_ids),
"_auto_join on one2many with domains incorrect result")
# Test produced queries that domains effectively present
sql_query = self.query_list[0].get_sql()
expected = "%s::text like %s" % (unaccent('"res_partner__child_ids__bank_ids"."acc_number"'), unaccent('%s'))
self.assertIn(expected, sql_query[1],
"_auto_join on one2many with domains incorrect result")
# TDE TODO: check first domain has a correct table name
self.assertIn('"res_partner__child_ids"."name" = %s', sql_query[1],
"_auto_join on one2many with domains incorrect result")
partner_child_ids_col._domain = lambda self: [('name', '=', '__%s' % self._name)]
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, ['&', (1, '=', 1), ('child_ids.bank_ids.id', 'in', [b_aa, b_ba])])
# Test result: no one
self.assertFalse(partner_ids,
"_auto_join on one2many with domains incorrect result")
# ----------------------------------------
# Test5: result-based tests
# ----------------------------------------
partner_bank_ids_col._auto_join = False
partner_child_ids_col._auto_join = False
partner_state_id_col._auto_join = False
partner_parent_id_col._auto_join = False
state_country_id_col._auto_join = False
partner_child_ids_col._domain = []
partner_bank_ids_col._domain = []
# Do: ('child_ids.state_id.country_id.code', 'like', '..') without _auto_join
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('child_ids.state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b]).issubset(set(partner_ids)),
"_auto_join off: ('child_ids.state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 5,
"_auto_join off: ('child_ids.state_id.country_id.code', 'like', '..') number of queries incorrect")
# Do: ('child_ids.state_id.country_id.code', 'like', '..') with _auto_join
partner_child_ids_col._auto_join = True
partner_state_id_col._auto_join = True
state_country_id_col._auto_join = True
self._reinit_mock()
partner_ids = partner_obj.search(cr, uid, [('child_ids.state_id.country_id.code', 'like', name_test)])
# Test result: at least our added data + demo data
self.assertTrue(set([p_a, p_b]).issubset(set(partner_ids)),
"_auto_join on: ('child_ids.state_id.country_id.code', 'like', '..') incorrect result")
# Test produced queries
self.assertEqual(len(self.query_list), 1,
"_auto_join on: ('child_ids.state_id.country_id.code', 'like', '..') number of queries incorrect")
# Remove mocks and modifications
partner_bank_ids_col._auto_join = False
partner_child_ids_col._auto_join = False
partner_state_id_col._auto_join = False
partner_parent_id_col._auto_join = False
state_country_id_col._auto_join = False
def test_30_normalize_domain(self):
expression = openerp.osv.expression
norm_domain = domain = ['&', (1, '=', 1), ('a', '=', 'b')]
assert norm_domain == expression.normalize_domain(domain), "Normalized domains should be left untouched"
domain = [('x', 'in', ['y', 'z']), ('a.v', '=', 'e'), '|', '|', ('a', '=', 'b'), '!', ('c', '>', 'd'), ('e', '!=', 'f'), ('g', '=', 'h')]
norm_domain = ['&', '&', '&'] + domain
assert norm_domain == expression.normalize_domain(domain), "Non-normalized domains should be properly normalized"
def test_translate_search(self):
Country = self.registry('res.country')
be = self.ref('base.be')
domains = [
[('name', '=', 'Belgium')],
[('name', 'ilike', 'Belgi')],
[('name', 'in', ['Belgium', 'Care Bears'])],
]
for domain in domains:
ids = Country.search(self.cr, self.uid, domain)
self.assertListEqual([be], ids)
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
sergei-maertens/django | tests/admin_views/customadmin.py | 379 | 2366 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http import HttpResponse
from . import admin as base_admin, forms, models
class Admin2(admin.AdminSite):
app_index_template = 'custom_admin/app_index.html'
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = ['custom_admin/index.html'] # a list, to test fix for #18697
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return [
url(r'^my_view/$', self.admin_view(self.my_view), name='my_view'),
] + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
def password_change(self, request, extra_context=None):
return super(Admin2, self).password_change(request, {'spam': 'eggs'})
class UserLimitedAdmin(UserAdmin):
# used for testing password change on a user not in queryset
def get_queryset(self, request):
qs = super(UserLimitedAdmin, self).get_queryset(request)
return qs.filter(is_superuser=False)
class CustomPwdTemplateUserAdmin(UserAdmin):
change_user_password_template = ['admin/auth/user/change_password.html'] # a list, to test fix for #18697
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name='admin4')
simple_site.register(User, CustomPwdTemplateUserAdmin)
| bsd-3-clause |
jetskijoe/headphones | lib/requests/packages/urllib3/response.py | 64 | 17149 | from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
return data
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
except Exception:
# The response may not be closed but we're not going to use it anymore
# so close it now to ensure that the connection is released back to the pool.
if self._original_response and not self._original_response.isclosed():
self._original_response.close()
raise
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
yield self._decode(chunk, decode_content=decode_content,
flush_decoder=True)
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| gpl-3.0 |
jss-emr/openerp-7-src | openerp/addons/account_asset/wizard/wizard_asset_compute.py | 5 | 2542 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class asset_depreciation_confirmation_wizard(osv.osv_memory):
_name = "asset.depreciation.confirmation.wizard"
_description = "asset.depreciation.confirmation.wizard"
_columns = {
'period_id': fields.many2one('account.period', 'Period', required=True, help="Choose the period for which you want to automatically post the depreciation lines of running assets"),
}
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid)
if periods:
return periods[0]
return False
_defaults = {
'period_id': _get_period,
}
def asset_compute(self, cr, uid, ids, context):
ass_obj = self.pool.get('account.asset.asset')
asset_ids = ass_obj.search(cr, uid, [('state','=','open')], context=context)
data = self.browse(cr, uid, ids, context=context)
period_id = data[0].period_id.id
created_move_ids = ass_obj._compute_entries(cr, uid, asset_ids, period_id, context=context)
return {
'name': _('Created Asset Moves'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'domain': "[('id','in',["+','.join(map(str,created_move_ids))+"])]",
'type': 'ir.actions.act_window',
}
asset_depreciation_confirmation_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
meska/motioncontrol | migrations/0001_initial.py | 1 | 4735 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AlertSubscription',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('channel', models.CharField(max_length=80, choices=[['email', 'E-Mail'], ['telegram', 'Telegram Bot']])),
('destination', models.CharField(max_length=250)),
('alert_motion', models.BooleanField(default=False)),
('alert_nomotion', models.BooleanField(default=False)),
('alert_nomotion_length', models.TimeField(null=True, blank=True)),
('alert_from', models.DateTimeField(null=True, blank=True)),
('alert_to', models.DateTimeField(null=True, blank=True)),
('enabled', models.BooleanField(default=False)),
('sent', models.BooleanField(default=False)),
('pause', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Cam',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.CharField(max_length=100, null=True, blank=True)),
('thread_number', models.IntegerField(null=True, blank=True)),
('output_pictures', models.BooleanField(default=True)),
('online', models.BooleanField(default=True)),
('last_event', models.DateTimeField(null=True, blank=True)),
('on_event_script', models.CharField(max_length=200, default='/etc/motion/on_event_webhook.py')),
],
options={
'managed': True,
},
),
migrations.CreateModel(
name='ConfigValue',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=100)),
('value', models.CharField(max_length=255)),
('cam', models.ForeignKey(to='motioncontrol.Cam')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('datetime', models.DateTimeField()),
('event_type', models.IntegerField()),
('filename', models.CharField(max_length=250)),
('cam', models.ForeignKey(to='motioncontrol.Cam')),
],
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('name', models.CharField(unique=True, max_length=100)),
('admin_url', models.CharField(unique=True, max_length=200, default='http://127.0.0.1:8000/')),
('stream_url', models.CharField(unique=True, max_length=200, help_text='Stream base url, requires nginx configuration', default='http://127.0.0.1/')),
('local_config_folder', models.CharField(max_length=200, null=True, blank=True, help_text='On motion server')),
('local_data_folder', models.CharField(max_length=200, null=True, blank=True, help_text='On motion server')),
('remote_config_folder', models.CharField(max_length=200, null=True, blank=True, help_text='On Django server')),
('remote_data_folder', models.CharField(max_length=200, null=True, blank=True, help_text='On Django server')),
],
options={
'managed': True,
},
),
migrations.AddField(
model_name='cam',
name='server',
field=models.ForeignKey(to='motioncontrol.Server'),
),
migrations.AddField(
model_name='alertsubscription',
name='cam',
field=models.ForeignKey(to='motioncontrol.Cam'),
),
migrations.AlterUniqueTogether(
name='event',
unique_together=set([('cam', 'filename')]),
),
migrations.AlterUniqueTogether(
name='alertsubscription',
unique_together=set([('channel', 'destination', 'cam', 'alert_motion', 'alert_nomotion')]),
),
]
| gpl-2.0 |
jazzband/silk | project/tests/test_silky_middleware.py | 1 | 4462 | from django.urls import reverse
from django.test import TestCase, override_settings
from unittest.mock import patch, Mock
from silk.config import SilkyConfig
from silk.middleware import SilkyMiddleware, _should_intercept
from silk.models import Request
from .util import mock_data_collector
def fake_get_response():
def fake_response():
return 'hello world'
return fake_response
class TestApplyDynamicMappings(TestCase):
def test_dynamic_decorator(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'module': 'tests.data.dynamic',
'function': 'foo'
}
]
middleware._apply_dynamic_mappings()
from .data.dynamic import foo
mock = mock_data_collector()
with patch('silk.profiling.profiler.DataCollector', return_value=mock) as mock_DataCollector:
foo() # Should be wrapped in a decorator
self.assertTrue(mock_DataCollector.return_value.register_profile.call_count)
def test_dynamic_context_manager(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'module': 'tests.data.dynamic',
'function': 'foo',
'start_line': 1,
'end_line': 2,
}
]
middleware._apply_dynamic_mappings()
from .data.dynamic import foo
mock = mock_data_collector()
with patch('silk.profiling.profiler.DataCollector', return_value=mock) as mock_DataCollector:
foo()
self.assertTrue(mock_DataCollector.return_value.register_profile.call_count)
def test_invalid_dynamic_context_manager(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'module': 'tests.data.dynamic',
'function': 'foo2',
'start_line': 1,
'end_line': 7,
}
]
self.assertRaises(IndexError, middleware._apply_dynamic_mappings)
def test_invalid_dynamic_decorator_module(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'module': 'tests.data.dfsdf',
'function': 'foo'
}
]
self.assertRaises(AttributeError, middleware._apply_dynamic_mappings)
def test_invalid_dynamic_decorator_function_name(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'module': 'tests.data.dynamic',
'function': 'bar'
}
]
self.assertRaises(AttributeError, middleware._apply_dynamic_mappings)
def test_invalid_dynamic_mapping(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
{
'dfgdf': 'tests.data.dynamic',
'funcgdfgtion': 'bar'
}
]
self.assertRaises(KeyError, middleware._apply_dynamic_mappings)
def test_no_mappings(self):
middleware = SilkyMiddleware(fake_get_response)
SilkyConfig().SILKY_DYNAMIC_PROFILING = [
]
middleware._apply_dynamic_mappings() # Just checking no crash
class TestShouldIntercept(TestCase):
def test_should_intercept_non_silk_request(self):
request = Request()
request.path = '/myapp/foo'
should_intercept = _should_intercept(request)
self.assertTrue(should_intercept)
def test_should_intercept_silk_request(self):
request = Request()
request.path = reverse('silk:summary')
should_intercept = _should_intercept(request)
self.assertFalse(should_intercept)
@override_settings(ROOT_URLCONF='tests.urlconf_without_silk')
def test_should_intercept_without_silk_urls(self):
request = Request()
request.path = '/login'
_should_intercept(request) # Just checking no crash
def test_should_intercept_ignore_paths(self):
SilkyConfig().SILKY_IGNORE_PATHS = [
'/ignorethis'
]
request = Request()
request.path = '/ignorethis'
should_intercept = _should_intercept(request)
self.assertFalse(should_intercept)
| mit |
kikokubo/Sick-Beard-TPB | lib/requests/packages/charade/universaldetector.py | 70 | 6858 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {
'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()
}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| gpl-3.0 |
rezasafi/spark | examples/src/main/python/ml/kmeans_example.py | 52 | 1995 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/kmeans_example.py
This example requires NumPy (http://www.numpy.org/).
"""
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import ClusteringEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("KMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a k-means model.
kmeans = KMeans().setK(2).setSeed(1)
model = kmeans.fit(dataset)
# Make predictions
predictions = model.transform(dataset)
# Evaluate clustering by computing Silhouette score
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
# $example off$
spark.stop()
| apache-2.0 |
bopo/django-allauth | allauth/socialaccount/providers/soundcloud/tests.py | 71 | 1343 | from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import SoundCloudProvider
class SoundCloudTests(create_oauth2_tests(registry.by_id(SoundCloudProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"website": null,
"myspace_name": null,
"public_favorites_count": 0,
"followings_count": 1,
"full_name": "",
"id": 22341947,
"city": null,
"track_count": 0,
"playlist_count": 0,
"discogs_name": null,
"private_tracks_count": 0,
"followers_count": 0,
"online": true,
"username": "user187631676",
"description": null,
"kind": "user",
"website_title": null,
"primary_email_confirmed": false,
"permalink_url": "http://soundcloud.com/user187631676",
"private_playlists_count": 0,
"permalink": "user187631676",
"country": null,
"uri": "https://api.soundcloud.com/users/22341947",
"avatar_url": "https://a1.sndcdn.com/images/default_avatar_large.png?4b4189b",
"plan": "Free"
}""")
| mit |
atlassian/boto | boto/vpc/vpc.py | 135 | 7868 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Virtual Private Cloud.
"""
from boto.ec2.ec2object import TaggedEC2Object
class VPC(TaggedEC2Object):
def __init__(self, connection=None):
"""
Represents a VPC.
:ivar id: The unique ID of the VPC.
:ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC
(or default if the default options are associated with the VPC).
:ivar state: The current state of the VPC.
:ivar cidr_block: The CIDR block for the VPC.
:ivar is_default: Indicates whether the VPC is the default VPC.
:ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
:ivar classic_link_enabled: Indicates whether ClassicLink is enabled.
"""
super(VPC, self).__init__(connection)
self.id = None
self.dhcp_options_id = None
self.state = None
self.cidr_block = None
self.is_default = None
self.instance_tenancy = None
self.classic_link_enabled = None
def __repr__(self):
return 'VPC:%s' % self.id
def endElement(self, name, value, connection):
if name == 'vpcId':
self.id = value
elif name == 'dhcpOptionsId':
self.dhcp_options_id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'isDefault':
self.is_default = True if value == 'true' else False
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'classicLinkEnabled':
self.classic_link_enabled = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_vpc(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def _get_status_then_update_vpc(self, get_status_method, validate=False,
dry_run=False):
vpc_list = get_status_method(
[self.id],
dry_run=dry_run
)
if len(vpc_list):
updated_vpc = vpc_list[0]
self._update(updated_vpc)
elif validate:
raise ValueError('%s is not a valid VPC ID' % (self.id,))
def update(self, validate=False, dry_run=False):
self._get_status_then_update_vpc(
self.connection.get_all_vpcs,
validate=validate,
dry_run=dry_run
)
return self.state
def update_classic_link_enabled(self, validate=False, dry_run=False):
"""
Updates instance's classic_link_enabled attribute
:rtype: bool
:return: self.classic_link_enabled after update has occurred.
"""
self._get_status_then_update_vpc(
self.connection.get_all_classic_link_vpcs,
validate=validate,
dry_run=dry_run
)
return self.classic_link_enabled
def disable_classic_link(self, dry_run=False):
"""
Disables ClassicLink for a VPC. You cannot disable ClassicLink for a
VPC that has EC2-Classic instances linked to it.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.disable_vpc_classic_link(self.id,
dry_run=dry_run)
def enable_classic_link(self, dry_run=False):
"""
Enables a VPC for ClassicLink. You can then link EC2-Classic instances
to your ClassicLink-enabled VPC to allow communication over private IP
addresses. You cannot enable your VPC for ClassicLink if any of your
VPC's route tables have existing routes for address ranges within the
10.0.0.0/8 IP address range, excluding local routes for VPCs in the
10.0.0.0/16 and 10.1.0.0/16 IP address ranges.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.enable_vpc_classic_link(self.id,
dry_run=dry_run)
def attach_classic_instance(self, instance_id, groups, dry_run=False):
"""
Links an EC2-Classic instance to a ClassicLink-enabled VPC through one
or more of the VPC's security groups. You cannot link an EC2-Classic
instance to more than one VPC at a time. You can only link an instance
that's in the running state. An instance is automatically unlinked from
a VPC when it's stopped. You can link it to the VPC again when you
restart it.
After you've linked an instance, you cannot change the VPC security
groups that are associated with it. To change the security groups, you
must first unlink the instance, and then link it again.
Linking your instance to a VPC is sometimes referred to as attaching
your instance.
:type intance_id: str
:param instance_is: The ID of a ClassicLink-enabled VPC.
:tye groups: list
:param groups: The ID of one or more of the VPC's security groups.
You cannot specify security groups from a different VPC. The
members of the list can be
:class:`boto.ec2.securitygroup.SecurityGroup` objects or
strings of the id's of the security groups.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.attach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
groups=groups,
dry_run=dry_run
)
def detach_classic_instance(self, instance_id, dry_run=False):
"""
Unlinks a linked EC2-Classic instance from a VPC. After the instance
has been unlinked, the VPC security groups are no longer associated
with it. An instance is automatically unlinked from a VPC when
it's stopped.
:type intance_id: str
:param instance_is: The ID of the VPC to which the instance is linked.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:rtype: bool
:return: True if successful
"""
return self.connection.detach_classic_link_vpc(
vpc_id=self.id,
instance_id=instance_id,
dry_run=dry_run
)
| mit |
github-account-because-they-want-it/django | django/shortcuts.py | 135 | 7957 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 1.10, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| bsd-3-clause |
behanceops/moto | tests/test_swf/models/test_generic_type.py | 3 | 1866 | from sure import expect
from moto.swf.models import GenericType
# Tests for GenericType (ActivityType, WorkflowType)
class FooType(GenericType):
@property
def kind(self):
return "foo"
@property
def _configuration_keys(self):
return ["justAnExampleTimeout"]
def test_type_short_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"})
def test_type_medium_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict())
_type.to_medium_dict()["status"].should.equal("REGISTERED")
_type.to_medium_dict().should.contain("creationDate")
_type.to_medium_dict().should_not.contain("deprecationDate")
_type.to_medium_dict().should_not.contain("description")
_type.description = "foo bar"
_type.to_medium_dict()["description"].should.equal("foo bar")
_type.status = "DEPRECATED"
_type.to_medium_dict().should.contain("deprecationDate")
def test_type_full_dict_representation():
_type = FooType("test-foo", "v1.0")
_type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict())
_type.to_full_dict()["configuration"].should.equal({})
_type.task_list = "foo"
_type.to_full_dict()["configuration"]["defaultTaskList"].should.equal({"name":"foo"})
_type.just_an_example_timeout = "60"
_type.to_full_dict()["configuration"]["justAnExampleTimeout"].should.equal("60")
_type.non_whitelisted_property = "34"
keys = _type.to_full_dict()["configuration"].keys()
sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"])
def test_type_string_representation():
_type = FooType("test-foo", "v1.0")
str(_type).should.equal("FooType(name: test-foo, version: v1.0, status: REGISTERED)")
| apache-2.0 |
seppi91/CouchPotatoServer | libs/pyutil/observer.py | 106 | 3143 | # -*- test-case-name: allmydata.test.test_observer -*-
from twisted.internet import defer
try:
from foolscap.eventual import eventually
eventually # http://divmod.org/trac/ticket/1499
except ImportError:
from twisted.internet import reactor
def eventually(f, *args, **kwargs):
return reactor.callLater(0, f, *args, **kwargs)
"""The idiom we use is for the observed object to offer a method named
'when_something', which returns a deferred. That deferred will be fired when
something happens. The way this is typically implemented is that the observed
has an ObserverList whose when_fired method is called in the observed's
'when_something'."""
class OneShotObserverList:
"""A one-shot event distributor."""
def __init__(self):
self._fired = False
self._result = None
self._watchers = []
self.__repr__ = self._unfired_repr
def _unfired_repr(self):
return "<OneShotObserverList [%s]>" % (self._watchers, )
def _fired_repr(self):
return "<OneShotObserverList -> %s>" % (self._result, )
def _get_result(self):
return self._result
def when_fired(self):
if self._fired:
return defer.succeed(self._get_result())
d = defer.Deferred()
self._watchers.append(d)
return d
def fire(self, result):
assert not self._fired
self._fired = True
self._result = result
self._fire(result)
def _fire(self, result):
for w in self._watchers:
eventually(w.callback, result)
del self._watchers
self.__repr__ = self._fired_repr
def fire_if_not_fired(self, result):
if not self._fired:
self.fire(result)
class LazyOneShotObserverList(OneShotObserverList):
"""
a variant of OneShotObserverList which does not retain
the result it handles, but rather retains a callable()
through which is retrieves the data if and when needed.
"""
def __init__(self):
OneShotObserverList.__init__(self)
def _get_result(self):
return self._result_producer()
def fire(self, result_producer):
"""
@param result_producer: a no-arg callable which
returns the data which is to be considered the
'result' for this observer list. note that this
function may be called multiple times - once
upon initial firing, and potentially once more
for each subsequent when_fired() deferred created
"""
assert not self._fired
self._fired = True
self._result_producer = result_producer
if self._watchers: # if not, don't call result_producer
self._fire(self._get_result())
class ObserverList:
"""A simple class to distribute events to a number of subscribers."""
def __init__(self):
self._watchers = []
def subscribe(self, observer):
self._watchers.append(observer)
def unsubscribe(self, observer):
self._watchers.remove(observer)
def notify(self, *args, **kwargs):
for o in self._watchers:
eventually(o, *args, **kwargs)
| gpl-3.0 |
lzambella/Qyoutube-dl | youtube_dl/extractor/pbs.py | 8 | 27147 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
strip_jsonp,
unified_strdate,
US_RATINGS,
)
class PBSIE(InfoExtractor):
_STATIONS = (
(r'(?:video|www|player)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/
(r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/
(r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/
(r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org
(r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org
(r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/
(r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org
(r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org
(r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/
(r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm
# (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/
# (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/
# (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/
(r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org
(r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/
(r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/
(r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/
(r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/
(r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/
(r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/
(r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv
(r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/
(r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/
(r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org
(r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/
(r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/
(r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org
(r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org
(r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/
(r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/
(r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org
(r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/
(r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org
# (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org
# (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org
# (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org
(r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org
(r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org
(r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org
(r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org
(r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/
(r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/
(r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org
(r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org
(r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org
(r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/
# (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/
(r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/
(r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org
(r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org
(r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org
(r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/
(r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net
(r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org
(r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org
(r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/
# (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org
(r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org
(r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org
(r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org
(r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/
(r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/
(r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/
(r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org
(r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/
# (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/
(r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/
(r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org
(r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/
(r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org
(r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org
(r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/
(r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv
(r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/
# (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/
(r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/
(r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org
(r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/
(r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org
(r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org
(r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/
(r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/
(r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/
(r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/
(r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net
(r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org
(r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org
# (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/
(r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org
(r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/
(r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org
(r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org
(r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org
(r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/
(r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org
(r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org
(r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org
(r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org
(r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/
(r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/
(r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org
# (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org
# (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/
# (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/
(r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org
(r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org
(r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/
(r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/
(r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5
(r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/
(r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org
# (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org
(r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/
(r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/
(r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/
(r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/
(r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org
(r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org
(r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/
(r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/
(r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org
(r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/
(r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org
(r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/
(r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu
(r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/
(r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org
(r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org
# (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/
(r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/
(r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org
(r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org
(r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/
(r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org
(r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org
(r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/
(r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org
(r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org
(r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org
(r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org
# (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org
(r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/
(r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/
# (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org
(r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/
(r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/
(r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/
(r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org
(r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/
# (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu
# (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org
(r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org
(r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org
# (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org
# (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org
# (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org
(r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/
(r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/
(r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org
)
IE_NAME = 'pbs'
IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1])
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
(?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
(?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
''' % '|'.join(list(zip(*_STATIONS))[0])
_TESTS = [
{
'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
'md5': 'ce1888486f0908d555a8093cac9a7362',
'info_dict': {
'id': '2365006249',
'ext': 'mp4',
'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
'duration': 3190,
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/',
'md5': '143c98aa54a346738a3d78f54c925321',
'info_dict': {
'id': '2365297690',
'ext': 'mp4',
'title': 'FRONTLINE - Losing Iraq',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 5050,
},
'params': {
'skip_download': True, # requires ffmpeg
}
},
{
'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/',
'md5': 'b19856d7f5351b17a5ab1dc6a64be633',
'info_dict': {
'id': '2201174722',
'ext': 'mp4',
'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
'duration': 801,
},
},
{
'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
'md5': 'c62859342be2a0358d6c9eb306595978',
'info_dict': {
'id': '2365297708',
'ext': 'mp4',
'description': 'md5:68d87ef760660eb564455eb30ca464fe',
'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
'md5': '908f3e5473a693b266b84e25e1cf9703',
'info_dict': {
'id': '2365160389',
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
'title': 'NOVA - Killer Typhoon',
'duration': 3172,
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140122',
'age_limit': 10,
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
'info_dict': {
'id': 'united-states-of-secrets',
},
'playlist_count': 2,
},
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/',
'info_dict': {
'id': '2276541483',
'display_id': 'player',
'ext': 'mp4',
'title': 'American Experience - Death and the Civil War, Chapter 1',
'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
'duration': 682,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://video.pbs.org/video/2365367186/',
'info_dict': {
'id': '2365367186',
'display_id': '2365367186',
'ext': 'mp4',
'title': 'To Catch A Comet - Full Episode',
'description': 'On November 12, 2014, billions of kilometers from Earth, spacecraft orbiter Rosetta and lander Philae did what no other had dared to attempt \u2014 land on the volatile surface of a comet as it zooms around the sun at 67,000 km/hr. The European Space Agency hopes this mission can help peer into our past and unlock secrets of our origins.',
'duration': 3342,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
'skip': 'Expired',
},
{
# Video embedded in iframe containing angle brackets as attribute's value (e.g.
# "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see
# https://github.com/rg3/youtube-dl/issues/7059)
'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/',
'info_dict': {
'id': '2365546844',
'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
'ext': 'mp4',
'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
'description': 'md5:61db2ddf27c9912f09c241014b118ed1',
'duration': 1480,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
# Frontline video embedded via flp2012.js
'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists',
'info_dict': {
'id': '2070868960',
'display_id': 'the-atomic-artists',
'ext': 'mp4',
'title': 'FRONTLINE - The Atomic Artists',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 723,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
},
{
'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
'only_matching': True,
},
{
'url': 'http://watch.knpb.org/video/2365616055/',
'only_matching': True,
}
]
_ERRORS = {
101: 'We\'re sorry, but this video is not yet available.',
403: 'We\'re sorry, but this video is not available in your region due to right restrictions.',
404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.',
410: 'This video has expired and is no longer available for online streaming.',
}
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id')
display_id = presumptive_id
if presumptive_id:
webpage = self._download_webpage(url, display_id)
upload_date = unified_strdate(self._search_regex(
r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
webpage, 'upload date', default=None))
# tabbed frontline videos
tabbed_videos = re.findall(
r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
if tabbed_videos:
return tabbed_videos, presumptive_id, upload_date
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
]
media_id = self._search_regex(
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
if media_id:
return media_id, presumptive_id, upload_date
# Fronline video embedded via flp
video_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
if video_id:
# pkg_id calculation is reverse engineered from
# http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js
prg_id = self._search_regex(
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:]
if 'q' in prg_id:
prg_id = prg_id.split('q')[1]
prg_id = int(prg_id, 16)
getdir = self._download_json(
'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
presumptive_id, 'Downloading getdir JSON',
transform_source=strip_jsonp)
return getdir['mid'], presumptive_id, upload_date
for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage):
url = self._search_regex(
r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe,
'player URL', default=None, group='url')
if url:
break
mobj = re.match(self._VALID_URL, url)
player_id = mobj.group('player_id')
if not display_id:
display_id = player_id
if player_id:
player_page = self._download_webpage(
url, display_id, note='Downloading player page',
errnote='Could not download player page')
video_id = self._search_regex(
r'<div\s+id="video_([0-9]+)"', player_page, 'video ID')
else:
video_id = mobj.group('id')
display_id = video_id
return video_id, display_id, None
def _real_extract(self, url):
video_id, display_id, upload_date = self._extract_webpage(url)
if isinstance(video_id, list):
entries = [self.url_result(
'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
for vid_id in video_id]
return self.playlist_result(entries, display_id)
info = self._download_json(
'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
display_id)
formats = []
for encoding_name in ('recommended_encoding', 'alternate_encoding'):
redirect = info.get(encoding_name)
if not redirect:
continue
redirect_url = redirect.get('url')
if not redirect_url:
continue
redirect_info = self._download_json(
redirect_url + '?format=json', display_id,
'Downloading %s video url info' % encoding_name)
if redirect_info['status'] == 'error':
raise ExtractorError(
'%s said: %s' % (
self.IE_NAME,
self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])),
expected=True)
format_url = redirect_info.get('url')
if not format_url:
continue
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4', preference=1, m3u8_id='hls'))
else:
formats.append({
'url': format_url,
'format_id': redirect.get('eeid'),
})
self._sort_formats(formats)
rating_str = info.get('rating')
if rating_str is not None:
rating_str = rating_str.rpartition('-')[2]
age_limit = US_RATINGS.get(rating_str)
subtitles = {}
closed_captions_url = info.get('closed_captions_url')
if closed_captions_url:
subtitles['en'] = [{
'ext': 'ttml',
'url': closed_captions_url,
}]
# info['title'] is often incomplete (e.g. 'Full Episode', 'Episode 5', etc)
# Try turning it to 'program - title' naming scheme if possible
alt_title = info.get('program', {}).get('title')
if alt_title:
info['title'] = alt_title + ' - ' + re.sub(r'^' + alt_title + '[\s\-:]+', '', info['title'])
return {
'id': video_id,
'display_id': display_id,
'title': info['title'],
'description': info['program'].get('description'),
'thumbnail': info.get('image_url'),
'duration': int_or_none(info.get('duration')),
'age_limit': age_limit,
'upload_date': upload_date,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
davalb/Sylius | docs/_exts/sensio/sphinx/configurationblock.py | 65 | 2644 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2010-2012 Fabien Potencier
:license: MIT, see LICENSE for more details.
"""
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
'php-standalone': 'Standalone Use',
'php-symfony': 'Framework Use',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.