repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
jj46/fastdns
|
fastdns/resolver.py
|
1
|
10001
|
# -*- coding: utf-8 -*-
"""
resolver.py - A fast multi-threaded DNS resolver using the dnspython library
Examples:
# Importing the library
# from fastdns import resolver
# Resolving many DNS hosts
# >>> from pprint import pprint
# >>> r = resolver.Resolver(domain='cisco.com')
# >>> r.hostnames = {'www', 'wwwin', 'ds', 'release'}
# >>> cache = r.resolve()
# >>> pprint(cache)
# {'ds': {'171.71.198.38',
# '173.36.12.198',
# '173.36.129.230',
# '173.37.248.6',
# '64.100.37.70'},
# 'release': {'173.36.64.40', '173.37.253.56'},
# 'www': {'173.37.145.84', '72.163.4.161'},
# 'wwwin': {'173.36.27.110', '173.37.111.50'}}
# Resolving mixed hostnames and IPs (v4 and v6)
# >>> hostnames = {'www', '2001:420:210d::a', '8.8.8.8', 'www.purple.com'}
# >>> cache = resolver.Resolver(hostnames=hostnames, domain='cisco.com').resolve()
# >>> pprint(cache)
# {'dns-rtp': {'2001:420:210d::a'},
# 'google-public-dns-a.google.com': {'8.8.8.8'},
# 'www': {'173.37.145.84', '72.163.4.161'},
# 'www.purple.com': {'153.104.63.227'}}
# Performing a single DNS lookup
# >>> resolver.dns_lookup('wwwin')
# ['173.37.111.50']
# Performing a single reverse lookup
# >>> resolver.reverse_lookup('8.8.8.8')
# 'google-public-dns-a.google.com'
"""
from queue import Queue
from multiprocessing import Lock
from threading import Thread
import logging
import sys
import re
from ipaddress import ip_address
import traceback
import dns.resolver
import dns.exception
import dns.reversename
import requests
logging.getLogger(__name__).addHandler(logging.NullHandler())
def get_public_dns_servers(ipv6=False, max_per_country=100, countries=['us', 'gb']):
"""
Get DNS servers from https://public-dns.info/nameserver/<country_code>.txt
Args:
ipv6 (bool): Use ipv6
max_per_country (int): Maximum DNS servers per country to use (Default: 100)
countries (list): List of country codes to get DNS servers from (Default: ['us', 'gb'])
Returns:
set: DNS server IP addresses
"""
logging.info('Getting public DNS servers')
servers = set()
for c in countries:
try:
r = requests.get('https://public-dns.info/nameserver/{0}.txt'.format(c))
_servers = sorted(list(set(r.text.split())))
logging.debug('Got {0} servers for country code "{1}"'.format(len(_servers), c))
_servers = _servers[:max_per_country]
except:
logging.error('Unable to retrieve DNS servers for country "{0}"'.format(c))
continue
if ipv6:
for server in _servers:
try:
ip = ip_address(server)
servers.add(str(ip))
except:
logging.error('Invalid IP: {0}'.format(server))
continue
else:
for server in _servers:
try:
ip = ip_address(server)
if ip.version != 6:
servers.add(str(ip))
except:
logging.error('Invalid IP: {0}'.format(server))
continue
logging.info('Got {0} public DNS servers'.format(len(servers)))
return servers
def without_domain(host, domain):
"""
Remove domain from a host
Args:
host (str): hostname
domain (str): dns domain
Returns:
host (str): hostname without domain
Examples:
>>> without_domain('www.google.com', 'google.com')
'www'
"""
if host.endswith(domain):
return re.sub(re.escape(domain) + r'$', '', host)
else:
return host
def reverse_lookup(ip, server=None):
"""
Perform a reverse lookup on an `ip` with a given DNS `server`
Args:
ip (str): IP address of host
server (str): IP addres of DNS server
Returns:
str: hostname if found, None otherwise
"""
if server is not None:
r = dns.resolver.Resolver(configure=False)
r.nameservers = [server]
else:
r = dns.resolver.Resolver()
try:
dns_name = dns.reversename.from_address(ip)
a = re.sub('\.$', '', r.query(dns_name, "PTR")[0].to_text())
logging.debug('Reverse lookup for IP {0} using server {1} found name: {2}'.format(ip, server, a))
return a
except:
logging.debug('Reverse lookup for {0} using server {1} failed.'.format(ip, server))
logging.error(traceback.format_exc())
return None
def dns_lookup(hostname, server=None, timeout=3, domain=None):
"""
Resolve a `hostname` using a given DNS `server`
Args:
hostname (str): host to be resolved
server (str): IP address of DNS server
timeout (int): DNS timeout in seconds
domain (str): Domain name ('cisco.com')
Returns:
IP addresses (list): List of IP addresses (str) or None
"""
if server is not None:
r = dns.resolver.Resolver(configure=False)
r.nameservers = [server]
else:
r = dns.resolver.Resolver()
r.lifetime = timeout
if domain:
r.domain = dns.name.from_text(domain)
try:
answers = r.query(hostname, 'A')
except:
logging.error('DNS timeout looking up {0} using server {1}'.format(hostname, server))
return None
ips = set(a.to_text() for a in answers)
logging.debug('DNS lookup for {0} using server {1} found IPs: {2}'.format(hostname, server, ', '.join(ips)))
return ips
class Resolver:
def __init__(self, **kwargs):
"""
A fast DNS resolver
Args:
hostnames (set): Hostnames to perform DNS resolutions (or reverse lookups for IPs)
domain (str): DNS domain
timeout (int): DNS timeout
tries (int): number of DNS resolution attempts to try
nameservers (list): DNS name servers to query
"""
self.tries = kwargs.get('tries', 1)
self.timeout = kwargs.get('timeout', 5)
self.hostnames = kwargs.get('hostnames', ['www.google.com', 'www.cisco.com'])
self.domain = kwargs.get('domain', 'google.com')
self.nameservers = kwargs.get('nameservers', ['8.8.8.8', '8.8.4.4'])
self.cache = dict()
self.q = None
self.workers = None
def clear(self, cache=True):
self.hostnames = []
if cache:
self.cache = dict()
self.q = None
self.workers = None
def _run(self, q, lock):
"""
Run the DNS resolutions that have been queued until the queue is empty.
Update the DNS cache as hosts are resolved.
Args:
q (Queue.queue): Queue
"""
while True:
host, server = q.get()
ips = dns_lookup(host, server, domain=self.domain, timeout=self.timeout)
if ips:
self._update_cache(host, ips, lock)
else:
self._update_cache(host, set(), lock)
q.task_done()
def _update_cache(self, host, ips, lock):
"""
Updates the DNS cache for `host`, adding `ips` (does not remove IPs from existing cache).
Uses a lock to allow threads to update the cache
Args:
host (str): Hostname in the DNS cache
ips (set): Set of IP addresses (str)
"""
# logging.debug('Waiting for lock')
lock.acquire()
try:
# logging.debug('Acquired lock')
if ips:
if host not in self.cache:
self.cache[host] = set()
elif self.cache[host] is None:
self.cache[host] = set()
for ip in ips:
if ip is not None:
self.cache[host].add(ip)
else:
if host not in self.cache:
self.cache[host] = set()
finally:
lock.release()
def _create_workers(self, q, lock, num_workers):
"""
Create a Queue of workers for the DNS lookup operations
Args:
num_workers (int): number of threads to spawn
"""
logging.info('Creating {0} workers'.format(num_workers))
for i in range(num_workers):
worker = Thread(target=self._run, args=(q, lock), name='worker-{}'.format(i))
worker.setDaemon(True)
worker.start()
for i in range(self.tries):
for host in self.hostnames:
for server in self.nameservers:
q.put((host, server))
def _process_dead_hosts(self):
"""
Process dead hosts in the cache
Returns:
set: self.dead_hosts
"""
self.dead_hosts = {h for h, ips in self.cache.items() if not ips}
return self.dead_hosts
def resolve(self):
"""
Resolve all of the hosts in `self.hostnames`, storing the results in `self.cache`
Returns:
dict: self.cache
"""
lock = Lock()
max_workers = 510
num_queries = len(self.hostnames) * self.tries * len(self.nameservers)
if not self.workers:
q = Queue(maxsize=0)
if self.hostnames:
if num_queries > max_workers:
self._create_workers(q, lock, max_workers)
else:
self._create_workers(q, lock, num_queries)
else:
self._create_workers(q, lock, max_workers)
self.workers = True
self.q = q
logging.info('Performing {0} DNS lookups'.format(num_queries))
try:
self.q.join()
except KeyboardInterrupt:
sys.exit(1)
self._process_dead_hosts()
# self._process_external_hosts()
return self.cache
|
mit
|
simonwydooghe/ansible
|
lib/ansible/modules/network/avi/avi_microservicegroup.py
|
28
|
3952
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_microservicegroup
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of MicroServiceGroup Avi RESTful Object
description:
- This module is used to configure MicroServiceGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
name:
description:
- Name of the microservice group.
required: true
service_refs:
description:
- Configure microservice(es).
- It is a reference to an object of type microservice.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the microservice group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Microservice Group that can be used for setting up Network security policy
avi_microservicegroup:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
description: Group created by my Secure My App UI.
name: vs-msg-marketing
tenant_ref: admin
"""
RETURN = '''
obj:
description: MicroServiceGroup (api/microservicegroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
service_refs=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'microservicegroup',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
go-lab/labmanager
|
labmanager/rlms/ext/rest.py
|
5
|
9234
|
# -*-*- encoding: utf-8 -*-*-
#
# gateway4labs is free software: you can redistribute it and/or modify
# it under the terms of the BSD 2-Clause License
# gateway4labs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import sys
import json
import requests
import traceback
from flask import current_app
from flask.ext.wtf import TextField, Required, URL, PasswordField, SelectField
from labmanager.forms import AddForm, RetrospectiveForm, GenericPermissionForm
from labmanager.rlms import register, Laboratory, BaseRLMS, BaseFormCreator, Versions, Capabilities
def get_module(version):
return sys.modules[__name__]
class HttpAddForm(AddForm):
base_url = TextField("Base URL", validators = [Required(), URL(False) ])
login = TextField("Login", validators = [Required() ])
password = PasswordField("Password", validators = [])
extension = TextField("Extension", validators = [], description = "If required, provide an extension (e.g., .php) to the HTTP API")
mode = SelectField("Mode", choices=[('json', 'Pure JSON requests and responses'), ('json+form', 'JSON for responses, HTML forms for requests')], default = "json")
def __init__(self, add_or_edit, *args, **kwargs):
super(HttpAddForm, self).__init__(*args, **kwargs)
@staticmethod
def process_configuration(old_configuration, new_configuration):
return new_configuration
class HttpPermissionForm(RetrospectiveForm):
pass
class HttpLmsPermissionForm(HttpPermissionForm, GenericPermissionForm):
pass
class HttpFormCreator(BaseFormCreator):
def get_add_form(self):
return HttpAddForm
def get_permission_form(self):
return HttpPermissionForm
def get_lms_permission_form(self):
return HttpLmsPermissionForm
FORM_CREATOR = HttpFormCreator()
class RLMS(BaseRLMS):
def __init__(self, configuration):
self.configuration = configuration
config = json.loads(configuration or '{}')
self.base_url = config.get('base_url')
if self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
self.login = config.get('login')
self.password = config.get('password')
self.extension = config.get('extension', '')
self.context_id = str(config.get('context_id', ''))
self.mode = config.get('mode', 'json')
if not self.base_url or not self.login or not self.password:
raise Exception("Laboratory misconfigured: fields missing" )
def _inject_extension(self, remaining):
method_and_get_query = remaining.split('?',1)
if len(method_and_get_query) == 1:
return method_and_get_query[0] + self.extension
else: # 2
method, get_query = method_and_get_query
return method + self.extension + '?' + get_query
def _request(self, remaining, headers = {}):
remaining = self._inject_extension(remaining)
if '?' in remaining:
context_remaining = remaining + '&context_id=' + self.context_id
else:
context_remaining = remaining + '?context_id=' + self.context_id
url = '%s%s' % (self.base_url, context_remaining)
r = HTTP_PLUGIN.cached_session.get(url, auth = (self.login, self.password), headers = headers)
r.raise_for_status()
try:
return r.json()
except ValueError:
raise
def _request_post(self, remaining, data, headers = None):
remaining = self._inject_extension(remaining)
if headers is None:
headers = {}
if '?' in remaining:
context_remaining = remaining + '&context_id=' + self.context_id
else:
context_remaining = remaining + '?context_id=' + self.context_id
headers['Content-Type'] = 'application/json'
if self.mode == 'json':
data = json.dumps(data)
elif self.mode == 'json+form':
data = data
else:
raise Exception("Misconfigured mode: %s" % self.mode)
# Cached session will not cache anything in a post. But if the connection already exists to the server, we still use it, becoming faster
r = HTTP_PLUGIN.cached_session.post('%s%s' % (self.base_url, context_remaining), data = data, auth = (self.login, self.password), headers = headers)
return r.json()
def get_version(self):
return Versions.VERSION_1
def get_capabilities(self):
capabilities = HTTP_PLUGIN.rlms_cache.get('capabilities')
if capabilities is not None:
return capabilities
capabilities = self._request('/capabilities')
HTTP_PLUGIN.rlms_cache['capabilities'] = capabilities['capabilities']
return capabilities['capabilities']
def setup(self, back_url):
setup_url = self._request('/setup?back_url=%s' % back_url)
return setup_url['url']
def test(self):
response = self._request('/test_plugin')
valid = response.get('valid', False)
if not valid:
return response.get('error_messages', ['Invalid error message'])
def get_laboratories(self, **kwargs):
labs = HTTP_PLUGIN.rlms_cache.get('labs')
if labs is not None:
return labs
labs = self._request('/labs')['labs']
laboratories = []
for lab in labs:
laboratory = Laboratory(name = lab['name'], laboratory_id = lab['laboratory_id'], description = lab.get('description'), autoload = lab.get('autoload'))
laboratories.append(laboratory)
HTTP_PLUGIN.rlms_cache['labs'] = laboratories
return laboratories
def get_translations(self, laboratory_id, **kwargs):
cache_key = 'translations-%s' % laboratory_id
translations = HTTP_PLUGIN.rlms_cache.get(cache_key)
if translations is not None:
return translations
try:
translations_json = self._request('/translations?laboratory_id=%s' % requests.utils.quote(laboratory_id, ''))
except:
traceback.print_exc()
# Dont store in cache if error
return {'translations': {}, 'mails':[]}
for lang, lang_data in translations_json['translations'].items():
for key, data_value in lang_data.items():
data_value.pop('namespace', None)
HTTP_PLUGIN.rlms_cache[cache_key] = translations_json
return translations_json
def reserve(self, laboratory_id, username, institution, general_configuration_str, particular_configurations, request_payload, user_properties, *args, **kwargs):
request = {
'laboratory_id' : laboratory_id,
'username' : username,
'institution' : institution,
'general_configuration_str' : general_configuration_str,
'particular_configurations' : particular_configurations,
'request_payload' : request_payload,
'user_properties' : user_properties,
}
request.update(kwargs)
debug_mode = kwargs.get('debug', False) and current_app.debug
if debug_mode:
open('last_request.txt','w').write(json.dumps(request, indent = 4))
try:
response = self._request_post('/reserve', request)
except:
if debug_mode:
exc_info = traceback.format_exc()
open('last_request.txt','a').write(exc_info)
raise
else:
if debug_mode:
open('last_request.txt','a').write(json.dumps(response, indent = 4))
return {
'reservation_id' : response['reservation_id'],
'load_url' : response['load_url']
}
def load_widget(self, reservation_id, widget_name, **kwargs):
response = self._request('/widget?widget_name=%s' % widget_name, headers = { 'X-G4L-reservation-id' : reservation_id })
return {
'url' : response['url']
}
def get_check_urls(self, laboratory_id):
return self._request('/check_urls?laboratory_id={}'.format(laboratory_id))
def list_widgets(self, laboratory_id, **kwargs):
widgets_json = self._request('/widgets?laboratory_id=%s' % requests.utils.quote(laboratory_id))
widgets = []
for widget_json in widgets_json['widgets']:
widget = {
'name' : widget_json['name'],
'description' : widget_json.get('description',''),
}
widgets.append(widget)
return widgets
PLUGIN_NAME = "HTTP plug-in"
PLUGIN_VERSIONS = ['1.0']
def populate_cache(rlms):
capabilities = rlms.get_capabilities()
for lab in rlms.get_laboratories():
if Capabilities.TRANSLATIONS in capabilities:
rlms.get_translations(lab.laboratory_id)
if Capabilities.TRANSLATION_LIST in capabilities:
rlms.get_translation_list(lab.laboratory_id)
HTTP_PLUGIN = register(PLUGIN_NAME, PLUGIN_VERSIONS, __name__)
HTTP_PLUGIN.add_local_periodic_task('Populating cache', populate_cache, minutes = 55)
|
bsd-2-clause
|
hoangminhitvn/flask
|
flask/lib/python2.7/site-packages/pip/commands/zip.py
|
34
|
15782
|
from __future__ import absolute_import
import sys
import re
import fnmatch
import logging
import os
import shutil
import warnings
import zipfile
from pip.utils import display_path, backup_dir, rmtree
from pip.utils.deprecation import RemovedInPip7Warning
from pip.utils.logging import indent_log
from pip.exceptions import InstallationError
from pip.basecommand import Command
logger = logging.getLogger(__name__)
class ZipCommand(Command):
"""Zip individual packages."""
name = 'zip'
usage = """
%prog [options] <package> ..."""
summary = 'DEPRECATED. Zip individual packages.'
def __init__(self, *args, **kw):
super(ZipCommand, self).__init__(*args, **kw)
if self.name == 'zip':
self.cmd_opts.add_option(
'--unzip',
action='store_true',
dest='unzip',
help='Unzip (rather than zip) a package.')
else:
self.cmd_opts.add_option(
'--zip',
action='store_false',
dest='unzip',
default=True,
help='Zip (rather than unzip) a package.')
self.cmd_opts.add_option(
'--no-pyc',
action='store_true',
dest='no_pyc',
help=(
'Do not include .pyc files in zip files (useful on Google App '
'Engine).'),
)
self.cmd_opts.add_option(
'-l', '--list',
action='store_true',
dest='list',
help='List the packages available, and their zip status.')
self.cmd_opts.add_option(
'--sort-files',
action='store_true',
dest='sort_files',
help=('With --list, sort packages according to how many files they'
' contain.'),
)
self.cmd_opts.add_option(
'--path',
action='append',
dest='paths',
help=('Restrict operations to the given paths (may include '
'wildcards).'),
)
self.cmd_opts.add_option(
'-n', '--simulate',
action='store_true',
help='Do not actually perform the zip/unzip operation.')
self.parser.insert_option_group(0, self.cmd_opts)
def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match + '*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug(
"Skipping path %s because it doesn't match %s",
path,
', '.join(self.select_paths),
)
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug(
"Adding path %s because it doesn't match "
"anything already on sys.path",
match,
)
return result
def run(self, options, args):
warnings.warn(
"'pip zip' and 'pip unzip` are deprecated, and will be removed in "
"a future release.",
RemovedInPip7Warning,
)
self.select_paths = options.paths
self.simulate = options.simulate
if options.list:
return self.list(options, args)
if not args:
raise InstallationError(
'You must give at least one package to zip or unzip')
packages = []
for arg in args:
module_name, filename = self.find_package(arg)
if options.unzip and os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a zip file; cannot be '
'unzipped' % (module_name, filename)
)
elif not options.unzip and not os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a directory; cannot be '
'zipped' % (module_name, filename)
)
packages.append((module_name, filename))
last_status = None
for module_name, filename in packages:
if options.unzip:
last_status = self.unzip_package(module_name, filename)
else:
last_status = self.zip_package(
module_name, filename, options.no_pyc
)
return last_status
def unzip_package(self, module_name, filename):
zip_filename = os.path.dirname(filename)
if (not os.path.isfile(zip_filename) and
zipfile.is_zipfile(zip_filename)):
raise InstallationError(
'Module %s (in %s) isn\'t located in a zip file in %s'
% (module_name, filename, zip_filename))
package_path = os.path.dirname(zip_filename)
if package_path not in self.paths():
logger.warning(
'Unpacking %s into %s, but %s is not on sys.path',
display_path(zip_filename),
display_path(package_path),
display_path(package_path),
)
logger.info(
'Unzipping %s (in %s)', module_name, display_path(zip_filename),
)
if self.simulate:
logger.info(
'Skipping remaining operations because of --simulate'
)
return
with indent_log():
# FIXME: this should be undoable:
zip = zipfile.ZipFile(zip_filename)
to_save = []
for info in zip.infolist():
name = info.filename
if name.startswith(module_name + os.path.sep):
content = zip.read(name)
dest = os.path.join(package_path, name)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not content and dest.endswith(os.path.sep):
if not os.path.exists(dest):
os.makedirs(dest)
else:
with open(dest, 'wb') as f:
f.write(content)
else:
to_save.append((name, zip.read(name)))
zip.close()
if not to_save:
logger.debug(
'Removing now-empty zip file %s',
display_path(zip_filename)
)
os.unlink(zip_filename)
self.remove_filename_from_pth(zip_filename)
else:
logger.debug(
'Removing entries in %s/ from zip file %s',
module_name,
display_path(zip_filename),
)
zip = zipfile.ZipFile(zip_filename, 'w')
for name, content in to_save:
zip.writestr(name, content)
zip.close()
def zip_package(self, module_name, filename, no_pyc):
logger.info('Zip %s (in %s)', module_name, display_path(filename))
orig_filename = filename
if filename.endswith('.egg'):
dest_filename = filename
else:
dest_filename = filename + '.zip'
with indent_log():
# FIXME: I think this needs to be undoable:
if filename == dest_filename:
filename = backup_dir(orig_filename)
logger.info(
'Moving %s aside to %s', orig_filename, filename,
)
if not self.simulate:
shutil.move(orig_filename, filename)
try:
logger.debug(
'Creating zip file in %s', display_path(dest_filename),
)
if not self.simulate:
zip = zipfile.ZipFile(dest_filename, 'w')
zip.writestr(module_name + '/', '')
for dirpath, dirnames, filenames in os.walk(filename):
if no_pyc:
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
for fns, is_dir in [
(dirnames, True), (filenames, False)]:
for fn in fns:
full = os.path.join(dirpath, fn)
dest = os.path.join(
module_name,
dirpath[len(filename):].lstrip(
os.path.sep
),
fn,
)
if is_dir:
zip.writestr(dest + '/', '')
else:
zip.write(full, dest)
zip.close()
logger.debug(
'Removing old directory %s', display_path(filename),
)
if not self.simulate:
rmtree(filename)
except:
# FIXME: need to do an undo here
raise
# FIXME: should also be undone:
self.add_filename_to_pth(dest_filename)
def remove_filename_from_pth(self, filename):
for pth in self.pth_files():
with open(pth, 'r') as f:
lines = f.readlines()
new_lines = [
l for l in lines if l.strip() != filename]
if lines != new_lines:
logger.debug(
'Removing reference to %s from .pth file %s',
display_path(filename),
display_path(pth),
)
if not [line for line in new_lines if line]:
logger.debug(
'%s file would be empty: deleting', display_path(pth)
)
if not self.simulate:
os.unlink(pth)
else:
if not self.simulate:
with open(pth, 'wb') as f:
f.writelines(new_lines)
return
logger.warning(
'Cannot find a reference to %s in any .pth file',
display_path(filename),
)
def add_filename_to_pth(self, filename):
path = os.path.dirname(filename)
dest = filename + '.pth'
if path not in self.paths():
logger.warning(
'Adding .pth file %s, but it is not on sys.path',
display_path(dest),
)
if not self.simulate:
if os.path.exists(dest):
with open(dest) as f:
lines = f.readlines()
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.append(filename + '\n')
else:
lines = [filename + '\n']
with open(dest, 'wb') as f:
f.writelines(lines)
def pth_files(self):
for path in self.paths():
if not os.path.exists(path) or not os.path.isdir(path):
continue
for filename in os.listdir(path):
if filename.endswith('.pth'):
yield os.path.join(path, filename)
def find_package(self, package):
for path in self.paths():
full = os.path.join(path, package)
if os.path.exists(full):
return package, full
if not os.path.isdir(path) and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
try:
zip.read(os.path.join(package, '__init__.py'))
except KeyError:
pass
else:
zip.close()
return package, full
zip.close()
# FIXME: need special error for package.py case:
raise InstallationError(
'No package with the name %s found' % package)
def list(self, options, args):
if args:
raise InstallationError(
'You cannot give an argument with --list')
for path in sorted(self.paths()):
if not os.path.exists(path):
continue
basename = os.path.basename(path.rstrip(os.path.sep))
if os.path.isfile(path) and zipfile.is_zipfile(path):
if os.path.dirname(path) not in self.paths():
logger.info('Zipped egg: %s', display_path(path))
continue
if (basename != 'site-packages' and
basename != 'dist-packages' and not
path.replace('\\', '/').endswith('lib/python')):
continue
logger.info('In %s:', display_path(path))
with indent_log():
zipped = []
unzipped = []
for filename in sorted(os.listdir(path)):
ext = os.path.splitext(filename)[1].lower()
if ext in ('.pth', '.egg-info', '.egg-link'):
continue
if ext == '.py':
logger.debug(
'Not displaying %s: not a package',
display_path(filename)
)
continue
full = os.path.join(path, filename)
if os.path.isdir(full):
unzipped.append((filename, self.count_package(full)))
elif zipfile.is_zipfile(full):
zipped.append(filename)
else:
logger.debug(
'Unknown file: %s', display_path(filename),
)
if zipped:
logger.info('Zipped packages:')
with indent_log():
for filename in zipped:
logger.info(filename)
else:
logger.info('No zipped packages.')
if unzipped:
if options.sort_files:
unzipped.sort(key=lambda x: -x[1])
logger.info('Unzipped packages:')
with indent_log():
for filename, count in unzipped:
logger.info('%s (%i files)', filename, count)
else:
logger.info('No unzipped packages.')
def count_package(self, path):
total = 0
for dirpath, dirnames, filenames in os.walk(path):
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
total += len(filenames)
return total
|
bsd-3-clause
|
googleinterns/smart-content-summary
|
classifier/preprocess_cola_dataset_for_classifier.py
|
1
|
7832
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Preprocess the CoLA (The Corpus of Linguistic Acceptability) grammar
dataset classification task."""
import argparse
import csv
import os
import random
import preprocess_utils
PREPROCESSED_FILE_PATH = "~/classifier_preprocessed_cola_dataset.tsv"
MIXED_FILE_PATH = "~/classifier_mixed_training_set_grammar.tsv"
def main(args):
"""Preprocess the CoLA grammar dataset.
Args:
args: command line arguments.
"""
data_file = os.path.expanduser(args.raw_data_file)
if not os.path.isfile(data_file):
raise Exception("Data file not found.")
sentences_positive = []
sentences_negative = []
with open(data_file) as tsv_file:
read_tsv = csv.reader(tsv_file, delimiter="\t")
for line in read_tsv:
if int(line[1]) == 1:
sentences_positive.append(line[3])
else:
sentences_negative.append(line[3])
cleaned_sentences_positive = preprocess_utils.text_strip(sentences_positive)
cleaned_sentences_negative = preprocess_utils.text_strip(sentences_negative)
print("Number of samples is",
len(cleaned_sentences_positive) + len(cleaned_sentences_negative))
print("Number of incorrect sample is", len(cleaned_sentences_negative),
"and number of correct sample is", len(cleaned_sentences_positive))
spaced_sentences_positive = preprocess_utils.tokenize_with_space(
cleaned_sentences_positive)
spaced_sentences_negative = preprocess_utils.tokenize_with_space(
cleaned_sentences_negative)
with open(os.path.expanduser(PREPROCESSED_FILE_PATH), 'wt') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for positive_sentence in spaced_sentences_positive:
tsv_writer.writerow([positive_sentence, "1"])
for negative_sentence in spaced_sentences_negative:
tsv_writer.writerow([negative_sentence, "0"])
print("-------Preprocessed data saved to", PREPROCESSED_FILE_PATH, "-------")
print("-------Now mixing dataset with the MS dataset.-------")
MS_data_file = os.path.expanduser(args.MS_data_file)
if not os.path.isfile(MS_data_file):
raise Exception("Microsoft data file not found.")
MS_sentences = []
MS_ratings = []
number_of_MS_samples_in_each_category = [0, 0]
with open(MS_data_file) as tsv_file:
read_tsv = csv.reader(tsv_file, delimiter="\t")
for line in read_tsv:
MS_sentences.append(line[0])
MS_ratings.append(int(line[1]))
number_of_MS_samples_in_each_category[int(line[1])] += 1
max_negative_rate = (number_of_MS_samples_in_each_category[0] +
len(cleaned_sentences_negative)) / \
(sum(number_of_MS_samples_in_each_category) + len(cleaned_sentences_negative))
min_negative_rate = (number_of_MS_samples_in_each_category[0] +
len(cleaned_sentences_negative)) / \
(sum(number_of_MS_samples_in_each_category) + len(cleaned_sentences_positive) +
len(cleaned_sentences_negative))
goal_percentage = args.goal_percentage_of_neg_samples
if goal_percentage is None:
number_of_pos_sample_to_include = 0
else:
if goal_percentage > max_negative_rate:
raise Exception(
"The goal negative sample percentage is greater than the largest"
"possible value {:.2f}".format(max_negative_rate))
if goal_percentage < min_negative_rate:
raise Exception(
"The goal negative sample percentage is smaller than the smallest"
"possible value {:.2f}".format(min_negative_rate))
number_of_pos_sample_to_include = int(
(1 - goal_percentage) / goal_percentage *
(len(cleaned_sentences_negative) +
number_of_MS_samples_in_each_category[0]) -
number_of_MS_samples_in_each_category[1])
print("------- Including", number_of_pos_sample_to_include,
"samples from the cola dataset.")
MS_sentences = MS_sentences + spaced_sentences_positive[0:number_of_pos_sample_to_include] + \
spaced_sentences_negative
MS_ratings = MS_ratings + [1] * number_of_pos_sample_to_include + [0] * len(
spaced_sentences_negative)
actual_negative_rate = (number_of_MS_samples_in_each_category[0] +
len(spaced_sentences_negative)) / \
(sum(number_of_MS_samples_in_each_category) +
len(spaced_sentences_negative) + number_of_pos_sample_to_include)
print("-------The percentage of negative sample is",
"{:.2f}".format(actual_negative_rate), "-------")
shuffled_index = list(range(len(MS_sentences)))
random.shuffle(shuffled_index)
with open(os.path.expanduser(MIXED_FILE_PATH), 'wt') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for index in shuffled_index:
tsv_writer.writerow([MS_sentences[index], MS_ratings[index]])
print("-------", len(MS_sentences), "samples saved to", MIXED_FILE_PATH,
"-------")
if __name__ == "__main__":
"""Preprocess the CoLa grammar rating dataset and mix with Microsoft dataset.
CoLa data needs to be downloaded from https://nyu-mll.github.io/CoLA and the abosolute path to the raw data file is
provided as a command line argument.
The CoLa dataset is mixed with the Microsoft datase whose data file path is also provided as a command line argument.
All negative samples in the CoLa dataset are mixed into the final training set, while the number of CoLa positive
samples mixed is adjusted to match the target percentage of negative samples if the target is provided. If the target
percentage is not provided, none of the positive samples will be mixed.
Dataset is split into training, tuning, and validation sets, with the number of samples in the tuning and validation
set being specified in the command line argument. The three sets are saved in three separate tsv files, and all the
preprocessed data are saved in another tsv file.
usage: preprocess_cola_dataset_for_classifier.py [-h] [-goal_percentage_of_neg_samples GOAL_PERCENTAGE_OF_NEG_SAMPLES]
raw_data_file MS_data_file
positional arguments:
raw_data_file Absolute path to the cola grammar dataset tsv file.
MS_data_file Absolute path to the preprocessed MS dataset tsv file.
optional arguments:
-h, --help show this help message and exit
-goal_percentage_of_neg_samples GOAL_PERCENTAGE_OF_NEG_SAMPLES
The goal of negative samplepercentage after mixing the MS and cola dataset. If not provided,
the mixing will maximize the percentage of negative samples.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"raw_data_file",
help="Absolute path to the cola grammar dataset tsv file.")
parser.add_argument(
"MS_data_file",
help="Absolute path to the preprocessed MS dataset tsv file.")
parser.add_argument(
'-goal_percentage_of_neg_samples',
type=float,
help="The goal of negative sample"
"percentage after mixing the MS and cola dataset. If not provided, the mixing "
"will maximize the percentage of negative samples.")
arguments = parser.parse_args()
main(arguments)
|
apache-2.0
|
palkeo/nebulosa
|
django_extensions/templatetags/widont.py
|
43
|
2034
|
import re
from django.template import Library
try:
from django.utils.encoding import force_text
except ImportError:
# Django 1.4 compatibility
from django.utils.encoding import force_unicode as force_text
register = Library()
re_widont = re.compile(r'\s+(\S+\s*)$')
re_widont_html = re.compile(r'([^<>\s])\s+([^<>\s]+\s*)(</?(?:address|blockquote|br|dd|div|dt|fieldset|form|h[1-6]|li|noscript|p|td|th)[^>]*>|$)', re.IGNORECASE)
def widont(value, count=1):
"""
Adds an HTML non-breaking space between the final two words of the string to
avoid "widowed" words.
Examples:
>>> print(widont('Test me out'))
Test me out
>>> widont('It works with trailing spaces too ')
u'It works with trailing spaces too '
>>> print(widont('NoEffect'))
NoEffect
"""
def replace(matchobj):
return force_text(' %s' % matchobj.group(1))
for i in range(count):
value = re_widont.sub(replace, force_text(value))
return value
def widont_html(value):
"""
Adds an HTML non-breaking space between the final two words at the end of
(and in sentences just outside of) block level tags to avoid "widowed"
words.
Examples:
>>> print(widont_html('<h2>Here is a simple example </h2> <p>Single</p>'))
<h2>Here is a simple example </h2> <p>Single</p>
>>> print(widont_html('<p>test me<br /> out</p><h2>Ok?</h2>Not in a p<p title="test me">and this</p>'))
<p>test me<br /> out</p><h2>Ok?</h2>Not in a p<p title="test me">and this</p>
>>> print(widont_html('leading text <p>test me out</p> trailing text'))
leading text <p>test me out</p> trailing text
"""
def replace(matchobj):
return force_text('%s %s%s' % matchobj.groups())
return re_widont_html.sub(replace, force_text(value))
register.filter(widont)
register.filter(widont_html)
if __name__ == "__main__":
def _test():
import doctest
doctest.testmod()
_test()
|
unlicense
|
hexxter/home-assistant
|
homeassistant/components/climate/nest.py
|
2
|
6379
|
"""
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
import homeassistant.components.nest as nest
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE)
from homeassistant.const import (
TEMP_CELSIUS, CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN)
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Nest thermostat."""
temp_unit = hass.config.units.temperature_unit
add_devices([NestThermostat(structure, device, temp_unit)
for structure, device in nest.devices()])
# pylint: disable=abstract-method,too-many-public-methods
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self.device = device
self._fan_list = [STATE_ON, STATE_AUTO]
self._operation_list = [STATE_HEAT, STATE_COOL, STATE_AUTO,
STATE_OFF]
@property
def name(self):
"""Return the name of the nest, if any."""
location = self.device.where
name = self.device.name
if location is None:
return name
else:
if name == '':
return location.capitalize()
else:
return location.capitalize() + '(' + name + ')'
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
# Move these to Thermostat Device and make them global
return {
"humidity": self.device.humidity,
"target_humidity": self.device.target_humidity,
}
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode == 'cool':
return STATE_COOL
elif self.device.mode == 'heat':
return STATE_HEAT
elif self.device.mode == 'range':
return STATE_AUTO
elif self.device.mode == 'off':
return STATE_OFF
else:
return STATE_UNKNOWN
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.device.mode != 'range' and not self.is_away_mode_on:
return self.device.target
else:
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.is_away_mode_on and self.device.away_temperature[0]:
# away_temperature is always a low, high tuple
return self.device.away_temperature[0]
if self.device.mode == 'range':
return self.device.target[0]
else:
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.is_away_mode_on and self.device.away_temperature[1]:
# away_temperature is always a low, high tuple
return self.device.away_temperature[1]
if self.device.mode == 'range':
return self.device.target[1]
else:
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self.structure.away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp_low is not None and target_temp_high is not None:
if self.device.mode == 'range':
temp = (target_temp_low, target_temp_high)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
self.device.target = temp
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode == STATE_HEAT:
self.device.mode = 'heat'
elif operation_mode == STATE_COOL:
self.device.mode = 'cool'
elif operation_mode == STATE_AUTO:
self.device.mode = 'range'
elif operation_mode == STATE_OFF:
self.device.mode = 'off'
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
return STATE_ON if self.device.fan else STATE_AUTO
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
def set_fan_mode(self, fan):
"""Turn fan on/off."""
self.device.fan = fan.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
temp = self.device.away_temperature.low
if temp is None:
return super().min_temp
else:
return temp
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
temp = self.device.away_temperature.high
if temp is None:
return super().max_temp
else:
return temp
def update(self):
"""Python-nest has its own mechanism for staying up to date."""
pass
|
mit
|
intersense/stencil-overlapped-tiling-ics
|
scripts/yaml/constructor.py
|
391
|
25145
|
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
from error import *
from nodes import *
import datetime
import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = generator.next()
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
add_constructor = classmethod(add_constructor)
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
add_multi_constructor = classmethod(add_multi_constructor)
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == u'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
u'yes': True,
u'no': False,
u'true': True,
u'false': False,
u'on': True,
u'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
try:
return str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError), exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
node.start_mark)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class Constructor(SafeConstructor):
def construct_python_str(self, node):
return self.construct_scalar(node).encode('utf-8')
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_long(self, node):
return long(self.construct_yaml_int(node))
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
try:
__import__(name)
except ImportError, exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
return sys.modules[name]
def find_python_name(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = '__builtin__'
object_name = name
try:
__import__(module_name)
except ImportError, exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r" % (object_name.encode('utf-8'),
module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_module(suffix, node.start_mark)
class classobj: pass
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if newobj and isinstance(cls, type(self.classobj)) \
and not args and not kwds:
instance = self.classobj()
instance.__class__ = cls
return instance
elif newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
setattr(object, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
Constructor.construct_python_name)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/module:',
Constructor.construct_python_module)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
Constructor.construct_python_object)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/apply:',
Constructor.construct_python_object_apply)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
Constructor.construct_python_object_new)
|
mit
|
balister/gnuradio
|
gnuradio-runtime/python/gnuradio/gr/gr_threading_24.py
|
94
|
25507
|
"""Thread module emulating a subset of Java's threading model."""
# This started life as the threading.py module of Python 2.4
# It's been patched to fix a problem with join, where a KeyboardInterrupt
# caused a lock to be left in the acquired state.
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from collections import deque
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'Condition', 'currentThread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
format = "%s: %s\n" % (
currentThread().getName(), format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
return "<%s(%s, %d)>" % (
self.__class__.__name__,
self.__owner and self.__owner.getName(),
self.__count)
def acquire(self, blocking=1):
me = currentThread()
if self.__owner is me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial succes", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
def release(self):
me = currentThread()
assert self.__owner is me, "release() of un-acquire()d lock"
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
# Internal methods used by condition variables
def _acquire_restore(self, (count, owner)):
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner is currentThread()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by currentThread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
assert self._is_owned(), "wait() of un-acquire()d lock"
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
assert self._is_owned(), "notify() of un-acquire()d lock"
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
assert value >= 0, "Semaphore initial value must be >= 0"
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def isSet(self):
return self.__flag
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notifyAll()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {}
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__started = False
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return currentThread().isDaemon()
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started:
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status = status + " daemon"
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "thread already started"
if __debug__:
self._note("%s.start(): starting thread", self)
_active_limbo_lock.acquire()
_limbo[self] = self
_active_limbo_lock.release()
_start_new_thread(self.__bootstrap, ())
self.__started = True
_sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
def run(self):
if self.__target:
self.__target(*self.__args, **self.__kwargs)
def __bootstrap(self):
try:
self.__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
del _limbo[self]
_active_limbo_lock.release()
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.getName(), _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.getName() +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
self.__stop()
try:
self.__delete()
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notifyAll()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
_active_limbo_lock.acquire()
try:
try:
del _active[_get_ident()]
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
finally:
_active_limbo_lock.release()
def join(self, timeout=None):
assert self.__initialized, "Thread.__init__() not called"
assert self.__started, "cannot join thread before it is started"
assert self is not currentThread(), "cannot join current thread"
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
def getName(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
def setName(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started and not self.__stopped
def isDaemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
def setDaemon(self, daemonic):
assert self.__initialized, "Thread.__init__() not called"
assert not self.__started, "cannot set daemon status of active thread"
self.__daemonic = daemonic
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
import atexit
atexit.register(self.__exitfunc)
def _set_daemon(self):
return False
def __exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.isDaemon() and t.isAlive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die,
# nor can they be waited for.
# Their purpose is to return *something* from currentThread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
self._Thread__started = True
_active_limbo_lock.acquire()
_active[_get_ident()] = self
_active_limbo_lock.release()
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "currentThread(): no current thread for", _get_ident()
return _DummyThread()
def activeCount():
_active_limbo_lock.acquire()
count = len(_active) + len(_limbo)
_active_limbo_lock.release()
return count
def enumerate():
_active_limbo_lock.acquire()
active = _active.values() + _limbo.values()
_active_limbo_lock.release()
return active
# Create the main thread object
_MainThread()
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.getName(), counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.setName("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
|
gpl-3.0
|
MaximNevrov/neutron
|
neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py
|
38
|
4380
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging
import six
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agentschedulers_db
from neutron import manager
from neutron.plugins.common import constants as service_constants
LOG = logging.getLogger(__name__)
class MeteringAgentNotifyAPI(object):
"""API for plugin to notify L3 metering agent."""
def __init__(self, topic=topics.METERING_AGENT):
self.topic = topic
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context if context.is_admin else context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3_routers = {}
state = agentschedulers_db.get_admin_state_up_filter()
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
adminContext, [router['id']],
admin_state_up=state,
active=True)
for l3_agent in l3_agents:
LOG.debug('Notify metering agent at %(topic)s.%(host)s '
'the message %(method)s',
{'topic': self.topic,
'host': l3_agent.host,
'method': method})
l3_router = l3_routers.get(l3_agent.host, [])
l3_router.append(router)
l3_routers[l3_agent.host] = l3_router
for host, routers in six.iteritems(l3_routers):
cctxt = self.client.prepare(server=host)
cctxt.cast(context, method, routers=routers)
def _notification_fanout(self, context, method, router_id):
LOG.debug('Fanout notify metering agent at %(topic)s the message '
'%(method)s on router %(router_id)s',
{'topic': self.topic,
'method': method,
'router_id': router_id})
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, router_id=router_id)
def _notification(self, context, method, routers):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if utils.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
self._agent_notification(context, method, routers)
else:
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, routers=routers)
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, routers):
if routers:
self._notification(context, 'routers_updated', routers)
def update_metering_label_rules(self, context, routers):
self._notification(context, 'update_metering_label_rules', routers)
def add_metering_label_rule(self, context, routers):
self._notification(context, 'add_metering_label_rule', routers)
def remove_metering_label_rule(self, context, routers):
self._notification(context, 'remove_metering_label_rule', routers)
def add_metering_label(self, context, routers):
self._notification(context, 'add_metering_label', routers)
def remove_metering_label(self, context, routers):
self._notification(context, 'remove_metering_label', routers)
|
apache-2.0
|
RiccardoPecora/MP
|
Lib/email/feedparser.py
|
55
|
21090
|
# Copyright (C) 2004-2006 Python Software Foundation
# Authors: Baxter, Wouters and Warsaw
# Contact: email-sig@python.org
"""FeedParser - An email feed parser.
The feed parser implements an interface for incrementally parsing an email
message, line by line. This has advantages for certain applications, such as
those reading email messages off a socket.
FeedParser.feed() is the primary interface for pushing new data into the
parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
The other advantage of this parser is that it will never throw a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
__all__ = ['FeedParser']
import re
from email import errors
from email import message
NLCRE = re.compile('\r\n|\r|\n')
NLCRE_bol = re.compile('(\r\n|\r|\n)')
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
NLCRE_crack = re.compile('(\r\n|\r|\n)')
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
# except controls, SP, and ":".
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
EMPTYSTRING = ''
NL = '\n'
NeedMoreData = object()
class BufferedSubFile(object):
"""A file-ish object that can have new data loaded into it.
You can also push and pop line-matching predicates onto a stack. When the
current predicate matches the current line, a false EOF response
(i.e. empty string) is returned instead. This lets the parser adhere to a
simple abstraction -- it parses until EOF closes the current message.
"""
def __init__(self):
# The last partial line pushed into this object.
self._partial = ''
# The list of full, pushed lines, in reverse order
self._lines = []
# The stack of false-EOF checking predicates.
self._eofstack = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def push_eof_matcher(self, pred):
self._eofstack.append(pred)
def pop_eof_matcher(self):
return self._eofstack.pop()
def close(self):
# Don't forget any trailing partial line.
self._lines.append(self._partial)
self._partial = ''
self._closed = True
def readline(self):
if not self._lines:
if self._closed:
return ''
return NeedMoreData
# Pop the line off the stack and see if it matches the current
# false-EOF predicate.
line = self._lines.pop()
# RFC 2046, section 5.1.2 requires us to recognize outer level
# boundaries at any level of inner nesting. Do this, but be sure it's
# in the order of most to least nested.
for ateof in self._eofstack[::-1]:
if ateof(line):
# We're at the false EOF. But push the last line back first.
self._lines.append(line)
return ''
return line
def unreadline(self, line):
# Let the consumer push a line back into the buffer.
assert line is not NeedMoreData
self._lines.append(line)
def push(self, data):
"""Push some new data into this object."""
# Handle any previous leftovers
data, self._partial = self._partial + data, ''
# Crack into lines, but preserve the newlines on the end of each
parts = NLCRE_crack.split(data)
# The *ahem* interesting behaviour of re.split when supplied grouping
# parentheses is that the last element of the resulting list is the
# data after the final RE. In the case of a NL/CR terminated string,
# this is the empty string.
self._partial = parts.pop()
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
# is there a \n to follow later?
if not self._partial and parts and parts[-1].endswith('\r'):
self._partial = parts.pop(-2)+parts.pop()
# parts is a list of strings, alternating between the line contents
# and the eol character(s). Gather up a list of lines after
# re-attaching the newlines.
lines = []
for i in range(len(parts) // 2):
lines.append(parts[i*2] + parts[i*2+1])
self.pushlines(lines)
def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
def is_closed(self):
return self._closed
def __iter__(self):
return self
def next(self):
line = self.readline()
if line == '':
raise StopIteration
return line
class FeedParser:
"""A feed-style parser of email."""
def __init__(self, _factory=message.Message):
"""_factory is called with no arguments to create a new message obj"""
self._factory = _factory
self._input = BufferedSubFile()
self._msgstack = []
self._parse = self._parsegen().next
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
def _set_headersonly(self):
self._headersonly = True
def feed(self, data):
"""Push more data into the parser."""
self._input.push(data)
self._call_parse()
def _call_parse(self):
try:
self._parse()
except StopIteration:
pass
def close(self):
"""Parse all remaining data and return the root message object."""
self._input.close()
self._call_parse()
root = self._pop_message()
assert not self._msgstack
# Look for final set of defects
if root.get_content_maintype() == 'multipart' \
and not root.is_multipart():
root.defects.append(errors.MultipartInvariantViolationDefect())
return root
def _new_message(self):
msg = self._factory()
if self._cur and self._cur.get_content_type() == 'multipart/digest':
msg.set_default_type('message/rfc822')
if self._msgstack:
self._msgstack[-1].attach(msg)
self._msgstack.append(msg)
self._cur = msg
self._last = msg
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
def _parsegen(self):
# Create a new message and start by parsing headers.
self._new_message()
headers = []
# Collect the headers, searching for a line that doesn't match the RFC
# 2822 header or continuation pattern (including an empty line).
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
if not headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if not NLCRE.match(line):
self._input.unreadline(line)
break
headers.append(line)
# Done with the headers, so parse them and figure out what we're
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
# necessary in the older parser, which could throw errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
if self._cur.get_content_type() == 'message/delivery-status':
# message/delivery-status contains blocks of headers separated by
# a blank line. We'll represent each header block as a separate
# nested message object, but the processing is a bit different
# than standard message/* types because there is no body for the
# nested messages. A blank line separates the subparts.
while True:
self._input.push_eof_matcher(NLCRE.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
msg = self._pop_message()
# We need to pop the EOF matcher in order to tell if we're at
# the end of the current file, not the end of the last block
# of message headers.
self._input.pop_eof_matcher()
# The input stream must be sitting at the newline or at the
# EOF. We want to see if we're at the end of this subpart, so
# first consume the blank line, then test the next line to see
# if we're at this subpart's EOF.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
if line == '':
break
# Not at EOF so this is a line we're going to need.
self._input.unreadline(line)
return
if self._cur.get_content_maintype() == 'message':
# The message claims to be a message/* type, then what follows is
# another RFC 2822 message.
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
self._pop_message()
return
if self._cur.get_content_maintype() == 'multipart':
boundary = self._cur.get_boundary()
if boundary is None:
# The message /claims/ to be a multipart but it has not
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
self._cur.defects.append(errors.NoBoundaryInMultipartDefect())
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
# Create a line match predicate which matches the inter-part
# boundary as well as the end-of-multipart boundary. Don't push
# this onto the input stream until we've scanned past the
# preamble.
separator = '--' + boundary
boundaryre = re.compile(
'(?P<sep>' + re.escape(separator) +
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
capturing_preamble = True
preamble = []
linesep = False
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
mo = boundaryre.match(line)
if mo:
# If we're looking at the end boundary, we're done with
# this multipart. If there was a newline at the end of
# the closing boundary, then we need to initialize the
# epilogue with the empty string (see below).
if mo.group('end'):
linesep = mo.group('linesep')
break
# We saw an inter-part boundary. Were we in the preamble?
if capturing_preamble:
if preamble:
# According to RFC 2046, the last newline belongs
# to the boundary.
lastline = preamble[-1]
eolmo = NLCRE_eol.search(lastline)
if eolmo:
preamble[-1] = lastline[:-len(eolmo.group(0))]
self._cur.preamble = EMPTYSTRING.join(preamble)
capturing_preamble = False
self._input.unreadline(line)
continue
# We saw a boundary separating two parts. Consume any
# multiple boundary lines that may be following. Our
# interpretation of RFC 2046 BNF grammar does not produce
# body parts within such double boundaries.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
mo = boundaryre.match(line)
if not mo:
self._input.unreadline(line)
break
# Recurse to parse this subpart; the input stream points
# at the subpart's first line.
self._input.push_eof_matcher(boundaryre.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
# Because of RFC 2046, the newline preceding the boundary
# separator actually belongs to the boundary, not the
# previous subpart's payload (or epilogue if the previous
# part is a multipart).
if self._last.get_content_maintype() == 'multipart':
epilogue = self._last.epilogue
if epilogue == '':
self._last.epilogue = None
elif epilogue is not None:
mo = NLCRE_eol.search(epilogue)
if mo:
end = len(mo.group(0))
self._last.epilogue = epilogue[:-end]
else:
payload = self._last.get_payload()
if isinstance(payload, basestring):
mo = NLCRE_eol.search(payload)
if mo:
payload = payload[:-len(mo.group(0))]
self._last.set_payload(payload)
self._input.pop_eof_matcher()
self._pop_message()
# Set the multipart up for newline cleansing, which will
# happen if we're in a nested multipart.
self._last = self._cur
else:
# I think we must be in the preamble
assert capturing_preamble
preamble.append(line)
# We've seen either the EOF or the end boundary. If we're still
# capturing the preamble, we never saw the start boundary. Note
# that as a defect and store the captured text as the payload.
# Everything from here to the EOF is epilogue.
if capturing_preamble:
self._cur.defects.append(errors.StartBoundaryNotFoundDefect())
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# If the end boundary ended in a newline, we'll need to make sure
# the epilogue isn't None
if linesep:
epilogue = ['']
else:
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
epilogue.append(line)
# Any CRLF at the front of the epilogue is not technically part of
# the epilogue. Also, watch out for an empty string epilogue,
# which means a single newline.
if epilogue:
firstline = epilogue[0]
bolmo = NLCRE_bol.match(firstline)
if bolmo:
epilogue[0] = firstline[len(bolmo.group(0)):]
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# Otherwise, it's some non-multipart type, so the entire rest of the
# file contents becomes the payload.
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
def _parse_headers(self, lines):
# Passed a list of lines that make up the headers for the current msg
lastheader = ''
lastvalue = []
for lineno, line in enumerate(lines):
# Check for continuation
if line[0] in ' \t':
if not lastheader:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
defect = errors.FirstHeaderLineIsContinuationDefect(line)
self._cur.defects.append(defect)
continue
lastvalue.append(line)
continue
if lastheader:
# XXX reconsider the joining of folded lines
lhdr = EMPTYSTRING.join(lastvalue)[:-1].rstrip('\r\n')
self._cur[lastheader] = lhdr
lastheader, lastvalue = '', []
# Check for envelope header, i.e. unix-from
if line.startswith('From '):
if lineno == 0:
# Strip off the trailing newline
mo = NLCRE_eol.search(line)
if mo:
line = line[:-len(mo.group(0))]
self._cur.set_unixfrom(line)
continue
elif lineno == len(lines) - 1:
# Something looking like a unix-from at the end - it's
# probably the first line of the body, so push back the
# line and stop.
self._input.unreadline(line)
return
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
i = line.find(':')
if i < 0:
defect = errors.MalformedHeaderDefect(line)
self._cur.defects.append(defect)
continue
lastheader = line[:i]
lastvalue = [line[i+1:].lstrip()]
# Done with all the lines, so handle the last header.
if lastheader:
# XXX reconsider the joining of folded lines
self._cur[lastheader] = EMPTYSTRING.join(lastvalue).rstrip('\r\n')
|
gpl-3.0
|
shashank19gaurav/tatoeba2
|
docs/tatoeba2-django/tatoeba2/tests/conftest.py
|
1
|
7611
|
from tatoeba2.models import Sentences, SentenceComments, SentencesTranslations, Users, TagsSentences, SentencesSentencesLists, FavoritesUsers, SentenceAnnotations, Contributions, Wall
from datetime import datetime
from tatoeba2.management.commands.deduplicate import Dedup
from django.db import connections
from django.db.models.loading import get_model
import pytest
import os
def pytest_addoption(parser):
parser.addoption(
'--mysql', action='store_true',
help='handles mysql-specific resets that even transactions can\'t roll back...(gg mysql, gg)'
)
@pytest.fixture
def sents(db, request):
# no owner, no audio, no correctness 1-4
Sentences(text='Normal, not duplicated.', lang='eng', modified=datetime(2014, 1, 1)).save()
for i in xrange(3): Sentences(text='Normal, duplicated.', lang='eng', modified=datetime(2014, 1, 1)).save()
# has owner 5-8
Sentences(text='Has owner, not duplicated.', lang='eng', user_id=1, modified=datetime(2014, 1, 2)).save()
for i in xrange(2): Sentences(text='Has owner, duplicated.', lang='eng', modified=datetime(2014, 1, 2)).save()
Sentences(text='Has owner, duplicated.', lang='eng', user_id=1, modified=datetime(2014, 1, 2)).save()
# has audio 9-12
Sentences(text='Has audio, not duplicated.', lang='eng', hasaudio='shtooka', modified=datetime(2014, 1, 3)).save()
for i in xrange(2): Sentences(text='Has audio, duplicated.', lang='eng', modified=datetime(2014, 1, 3)).save()
Sentences(text='Has audio, duplicated.', lang='eng', hasaudio='shtooka', modified=datetime(2014, 1, 3)).save()
# correctness -1 13-16
Sentences(text='Correctness -1, not duplicated.', lang='eng', correctness=-1, modified=datetime(2014, 1, 4)).save()
for i in xrange(2): Sentences(text='Correctness -1, duplicated.', lang='eng', modified=datetime(2014, 1, 4)).save()
Sentences(text='Correctness -1, duplicated.', lang='eng', correctness=-1, modified=datetime(2014, 1, 4)).save()
# has owner, has audio, correctness -1 17-21
Sentences(text='Has owner, Has audio, Correctness -1, not duplicated.', lang='eng', user_id=1, hasaudio='shtooka', correctness=-1, modified=datetime(2014, 1, 5)).save()
Sentences(text='Has owner, Has audio, Correctness -1 duplicated.', lang='eng', modified=datetime(2014, 1, 5)).save()
Sentences(text='Has owner, Has audio, Correctness -1 duplicated.', lang='eng', user_id=1, modified=datetime(2014, 1, 5)).save()
Sentences(text='Has owner, Has audio, Correctness -1 duplicated.', lang='eng', hasaudio='shtooka', modified=datetime(2014, 1, 5)).save()
Sentences(text='Has owner, Has audio, Correctness -1 duplicated.', lang='eng', correctness=-1, modified=datetime(2014, 1, 5)).save()
for i in xrange(6, 8+1): SentenceComments(sentence_id=i, text='Comment on '+str(i), user_id=1, created=datetime.now(), hidden=0).save()
SentencesTranslations(sentence_id=6, translation_id=9, distance=1).save()
SentencesTranslations(sentence_id=9, translation_id=6, distance=1).save()
SentencesTranslations(sentence_id=7, translation_id=10, distance=1).save()
SentencesTranslations(sentence_id=10, translation_id=7, distance=1).save()
TagsSentences(tag_id=1, sentence_id=6, user_id=1, added_time=datetime.now()).save()
TagsSentences(tag_id=2, sentence_id=7, user_id=1, added_time=datetime.now()).save()
TagsSentences(tag_id=3, sentence_id=8, user_id=1, added_time=datetime.now()).save()
SentencesSentencesLists(sentences_list_id=1, sentence_id=6).save()
SentencesSentencesLists(sentences_list_id=2, sentence_id=7).save()
SentencesSentencesLists(sentences_list_id=3, sentence_id=8).save()
FavoritesUsers(user_id=1, favorite_id=6).save()
FavoritesUsers(user_id=2, favorite_id=7).save()
FavoritesUsers(user_id=3, favorite_id=8).save()
SentenceAnnotations(meaning_id=1, text='', modified=datetime.now(), user_id=1, sentence_id=6).save()
SentenceAnnotations(meaning_id=2, text='', modified=datetime.now(), user_id=1, sentence_id=7).save()
SentenceAnnotations(meaning_id=3, text='', modified=datetime.now(), user_id=1, sentence_id=8).save()
SentenceAnnotations(meaning_id=10, text='', modified=datetime.now(), user_id=1, sentence_id=13).save()
SentenceAnnotations(meaning_id=11, text='', modified=datetime.now(), user_id=1, sentence_id=14).save()
SentenceAnnotations(meaning_id=12, text='', modified=datetime.now(), user_id=1, sentence_id=15).save()
Contributions(text='Logs for 6', action='update', user_id=1, datetime=datetime.now(), type='sentence', sentence_id=6).save()
Contributions(text='Logs for 6', action='insert', user_id=1, datetime=datetime.now(), type='link', sentence_id=6, translation_id=9).save()
Contributions(text='Logs for 7', action='insert', user_id=1, datetime=datetime.now(), type='sentence', sentence_id=7).save()
Contributions(text='', action='insert', user_id=1, datetime=datetime.now(), type='sentence', sentence_id=8).save()
Contributions(text='Unknown datetime record', action='update', user_id=1, datetime=None, type='sentence', sentence_id=8).save()
Wall(owner=1, content='test post', date=datetime.utcnow(), title='', hidden=0, lft=1, rght=2).save()
if request.config.option.mysql:
def fin():
conn = connections['default']
def clean_up(model):
Model = get_model('tatoeba2.'+model)
Model.objects.all().delete()
conn.cursor().execute('TRUNCATE TABLE '+Model._meta.db_table+';')
conn.cursor().execute('ALTER TABLE '+Model._meta.db_table+' AUTO_INCREMENT = 1;')
clean_up('Sentences')
clean_up('SentencesTranslations')
clean_up('SentenceComments')
clean_up('TagsSentences')
clean_up('SentencesSentencesLists')
clean_up('FavoritesUsers')
clean_up('Contributions')
clean_up('Users')
clean_up('Wall')
clean_up('SentenceAnnotations')
request.addfinalizer(fin)
@pytest.fixture
def bot(db):
return Users.objects.create(
username='Horus', password='', email='bot@bots.com',
since=datetime.now(), last_time_active=datetime.now().strftime('%Y%m%d'),
level=1, is_public=1, send_notifications=0, group_id=1
)
@pytest.fixture
def dedup(request, bot):
Dedup.time_init()
Dedup.logger_init()
Dedup.dry = False
Dedup.bot = bot
def fin():
os.remove(Dedup.log_file_path)
request.addfinalizer(fin)
return Dedup
def bidirect_link(a, b):
SentencesTranslations(sentence_id=a, translation_id=b, distance=1).save()
SentencesTranslations(sentence_id=b, translation_id=a, distance=1).save()
@pytest.fixture
def linked_dups():
bidirect_link(1, 2)
bidirect_link(1, 3)
bidirect_link(2, 3)
bidirect_link(2, 5)
bidirect_link(3, 6)
bidirect_link(4, 6)
@pytest.fixture
def dups_in_list():
SentencesSentencesLists(sentences_list_id=4, sentence_id=3).save()
SentencesSentencesLists(sentences_list_id=4, sentence_id=4).save()
@pytest.fixture
def dups_in_fav():
FavoritesUsers(user_id=1, favorite_id=3).save()
FavoritesUsers(user_id=1, favorite_id=4).save()
@pytest.fixture
def linked_dups_depth():
Sentences(text='Has audio, duplicated.', lang='eng', created=datetime(2014, 1, 3)).save()
Sentences(text='Has audio, duplicated.', lang='eng', created=datetime(2014, 1, 3)).save()
bidirect_link(10, 11)
bidirect_link(11, 12)
bidirect_link(12, 13)
bidirect_link(13, 14)
|
agpl-3.0
|
fluxw42/youtube-dl
|
youtube_dl/extractor/weiqitv.py
|
64
|
1681
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class WeiqiTVIE(InfoExtractor):
IE_DESC = 'WQTV'
_VALID_URL = r'https?://(?:www\.)?weiqitv\.com/index/video_play\?videoId=(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://www.weiqitv.com/index/video_play?videoId=53c744f09874f0e76a8b46f3',
'md5': '26450599afd64c513bc77030ad15db44',
'info_dict': {
'id': '53c744f09874f0e76a8b46f3',
'ext': 'mp4',
'title': '2013年度盘点',
},
}, {
'url': 'http://www.weiqitv.com/index/video_play?videoId=567379a2d4c36cca518b4569',
'info_dict': {
'id': '567379a2d4c36cca518b4569',
'ext': 'mp4',
'title': '民国围棋史',
},
}, {
'url': 'http://www.weiqitv.com/index/video_play?videoId=5430220a9874f088658b4567',
'info_dict': {
'id': '5430220a9874f088658b4567',
'ext': 'mp4',
'title': '二路托过的手段和运用',
},
}]
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
info_json_str = self._search_regex(
r'var\s+video\s*=\s*(.+});', page, 'info json str')
info_json = self._parse_json(info_json_str, media_id)
letvcloud_url = self._search_regex(
r'var\s+letvurl\s*=\s*"([^"]+)', page, 'letvcloud url')
return {
'_type': 'url_transparent',
'ie_key': 'LetvCloud',
'url': letvcloud_url,
'title': info_json['name'],
'id': media_id,
}
|
unlicense
|
XiaosongWei/blink-crosswalk
|
Tools/Scripts/webkitpy/style/checkers/python_unittest.py
|
33
|
2675
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for python.py."""
import os
import unittest
from python import PythonChecker
class PythonCheckerTest(unittest.TestCase):
"""Tests the PythonChecker class."""
def test_init(self):
"""Test __init__() method."""
def _mock_handle_style_error(self):
pass
checker = PythonChecker("foo.txt", _mock_handle_style_error)
self.assertEqual(checker._file_path, "foo.txt")
self.assertEqual(checker._handle_style_error,
_mock_handle_style_error)
def test_check(self):
"""Test check() method."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
current_dir = os.path.dirname(__file__)
file_path = os.path.join(current_dir, "python_unittest_input.py")
checker = PythonChecker(file_path, _mock_handle_style_error)
checker.check(lines=[])
self.assertEqual(errors, [
(4, "pep8/W291", 5, "trailing whitespace"),
(4, "pylint/E1601(print-statement)", 5, "[] print statement used"),
(4, "pylint/E0602(undefined-variable)", 5, "[] Undefined variable 'error'"),
])
|
bsd-3-clause
|
arthurSena/processors
|
tests/processors/euctr/test_extractor.py
|
2
|
4326
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import datetime
import pytest
import processors.euctr.extractors as extractors
class TestEUCTRExtractors(object):
def test_stub_record_is_valid(self, stub_record):
extractors.extract_trial(stub_record)
@pytest.mark.parametrize('male, female, expected_gender', [
(True, None, 'male'),
(True, True, 'both'),
(True, False, 'male'),
])
def test_extract_gender_for_trial(self, stub_record, male, female, expected_gender):
stub_record.update({
'subject_male': male,
'subject_female': female,
})
trial = extractors.extract_trial(stub_record)
assert trial['gender'] == expected_gender
@pytest.mark.parametrize('identifiers, expected_identifiers', [
(
{
'eudract_number': '2013-030180-02',
'us_nct_clinicaltrials_gov_registry_number': 'NCT00020500',
},
{
'nct': 'NCT00020500',
'euctr': 'EUCTR2013-030180-02',
}
),
])
def test_extracted_identifiers_for_trial(self, stub_record, identifiers, expected_identifiers):
stub_record.update(identifiers)
extracted_trial = extractors.extract_trial(stub_record)
assert extracted_trial['identifiers'] == expected_identifiers
@pytest.mark.parametrize('status, expected_status, expected_rec_status', [
(None, None, None),
('Completed', 'complete', 'not_recruiting'),
])
def test_extract_status_for_trial(self, stub_record, status, expected_status, expected_rec_status):
stub_record.update({ 'trial_status': status })
extracted_trial = extractors.extract_trial(stub_record)
assert extracted_trial['status'] == expected_status
assert extracted_trial['recruitment_status'] == expected_rec_status
@pytest.mark.parametrize('trial_results_url, has_published_results', [
(None, False),
('https://www.clinicaltrialsregister.eu/ctr-search/trial/2015-004907-22/results', True),
])
def test_trial_has_published_results(self, stub_record, trial_results_url, has_published_results):
stub_record.update({ 'trial_results_url': trial_results_url })
extracted_trial = extractors.extract_trial(stub_record)
assert extracted_trial['has_published_results'] == has_published_results
@pytest.mark.parametrize('trial_results_url', [
'https://www.clinicaltrialsregister.eu/ctr-search/trial/2015-004907-22/result',
])
def test_extract_document_if_trial_results_url(self, stub_record, trial_results_url):
stub_record.update({ 'trial_results_url': trial_results_url })
extracted_documents = extractors.extract_documents(stub_record)
assert extracted_documents[0]['source_url'] == trial_results_url
@pytest.fixture
def stub_record():
return {
'eudract_number': '2013-030180-02',
'us_nct_clinicaltrials_gov_registry_number': 'NCT00020500',
'isrctn_international_standard_randomised_controlled_trial_numbe': None,
'who_universal_trial_reference_number_utrn': None,
'title_of_the_trial_for_lay_people_in_easily_understood_i_e_non_': 'Title',
'eudract_number_with_country': '2013-030180-02/IT',
'full_title_of_the_trial': 'Full title of trial',
'trial_status': 'Completed',
'subject_female': True,
'subject_male': True,
'trial_results': 'View results',
'trial_results_url': 'https://www.clinicaltrialsregister.eu/ctr-search/trial/2015-004907-22/results',
'trial_main_objective_of_the_trial': 'Heal everybody',
'date_on_which_this_record_was_first_entered_in_the_eudract_data': datetime.datetime(2012, 1, 1),
'date_of_the_global_end_of_the_trial': datetime.datetime(2016, 1, 1),
'public_title': 'Public title of trial',
'trial_principal_inclusion_criteria': 'everybody',
'trial_principal_exclusion_criteria': 'nobody',
'subject_in_the_whole_clinical_trial': 12,
'target_sample_size': 10000000,
'first_enrollment_date': None,
}
|
mit
|
chrisfilda/edx_platform
|
lms/djangoapps/instructor/tests/test_hint_manager.py
|
31
|
9180
|
import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from mock import patch, MagicMock
from courseware.models import XModuleUserStateSummaryField
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
import instructor.hint_manager as view
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class HintManagerTest(ModuleStoreTestCase):
def setUp(self):
"""
Makes a course, which will be the same for all tests.
Set up mako middleware, which is necessary for template rendering to happen.
"""
self.course = CourseFactory.create(org='Me', number='19.002', display_name='test_course')
self.url = '/courses/Me/19.002/test_course/hint_manager'
self.user = UserFactory.create(username='robot', email='robot@edx.org', password='test', is_staff=True)
self.c = Client()
self.c.login(username='robot', password='test')
self.problem_id = 'i4x://Me/19.002/crowdsource_hinter/crowdsource_hinter_001'
self.course_id = 'Me/19.002/test_course'
UserStateSummaryFactory.create(field_name='hints',
usage_id=self.problem_id,
value=json.dumps({'1.0': {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]},
'2.0': {'4': ['Hint 4', 3]}
}))
UserStateSummaryFactory.create(field_name='mod_queue',
usage_id=self.problem_id,
value=json.dumps({'2.0': {'2': ['Hint 2', 1]}}))
UserStateSummaryFactory.create(field_name='hint_pk',
usage_id=self.problem_id,
value=5)
# Mock out location_to_problem_name, which ordinarily accesses the modulestore.
# (I can't figure out how to get fake structures into the modulestore.)
view.location_to_problem_name = lambda course_id, loc: "Test problem"
def test_student_block(self):
"""
Makes sure that students cannot see the hint management view.
"""
c = Client()
UserFactory.create(username='student', email='student@edx.org', password='test')
c.login(username='student', password='test')
out = c.get(self.url)
print out
self.assertTrue('Sorry, but students are not allowed to access the hint manager!' in out.content)
def test_staff_access(self):
"""
Makes sure that staff can access the hint management view.
"""
out = self.c.get('/courses/Me/19.002/test_course/hint_manager')
print out
self.assertTrue('Hints Awaiting Moderation' in out.content)
def test_invalid_field_access(self):
"""
Makes sure that field names other than 'mod_queue' and 'hints' are
rejected.
"""
out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})
print out
self.assertTrue('an invalid field was accessed' in out.content)
def test_switchfields(self):
"""
Checks that the op: 'switch fields' POST request works.
"""
out = self.c.post(self.url, {'op': 'switch fields', 'field': 'mod_queue'})
print out
self.assertTrue('Hint 2' in out.content)
def test_gethints(self):
"""
Checks that gethints returns the right data.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue'})
out = view.get_hints(post, self.course_id, 'mod_queue')
print out
self.assertTrue(out['other_field'] == 'hints')
expected = {self.problem_id: [(u'2.0', {u'2': [u'Hint 2', 1]})]}
self.assertTrue(out['all_hints'] == expected)
def test_gethints_other(self):
"""
Same as above, with hints instead of mod_queue
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints'})
out = view.get_hints(post, self.course_id, 'hints')
print out
self.assertTrue(out['other_field'] == 'mod_queue')
expected = {self.problem_id: [('1.0', {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]}),
('2.0', {'4': ['Hint 4', 3]})
]}
self.assertTrue(out['all_hints'] == expected)
def test_deletehints(self):
"""
Checks that delete_hints deletes the right stuff.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'delete hints',
1: [self.problem_id, '1.0', '1']})
view.delete_hints(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue('1' not in json.loads(problem_hints)['1.0'])
def test_changevotes(self):
"""
Checks that vote changing works.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'change votes',
1: [self.problem_id, '1.0', '1', 5]})
view.change_votes(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
# hints[answer][hint_pk (string)] = [hint text, vote count]
print json.loads(problem_hints)['1.0']['1']
self.assertTrue(json.loads(problem_hints)['1.0']['1'][1] == 5)
def test_addhint(self):
"""
Check that instructors can add new hints.
"""
# Because add_hint accesses the xmodule, this test requires a bunch
# of monkey patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: True
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id,
'answer': '3.14',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('3.14' in json.loads(problem_hints))
def test_addbadhint(self):
"""
Check that instructors cannot add hints with unparsable answers.
"""
# Patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: False
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id,
'answer': 'fish',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('fish' not in json.loads(problem_hints))
def test_approve(self):
"""
Check that instructors can approve hints. (Move them
from the mod_queue to the hints.)
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'approve',
1: [self.problem_id, '2.0', '2']})
view.approve(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])
self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)
|
agpl-3.0
|
FireWRT/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/distutils/text_file.py
|
136
|
12467
|
"""text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
import sys, os, io
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using 'io.open()'.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
errors [default: 'strict']
error handler used to decode the file content
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
'errors': 'strict',
}
def __init__(self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError("you must supply either or both of 'filename' and 'file'")
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr(self, opt, options[opt])
else:
setattr(self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError("invalid TextFile option '%s'" % opt)
if file is None:
self.open(filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open(self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = io.open(self.filename, 'r', errors=self.errors)
self.current_line = 0
def close(self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
self.file.close()
self.file = None
self.filename = None
self.current_line = None
def gen_error(self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if isinstance(line, (list, tuple)):
outmsg.append("lines %d-%d: " % tuple(line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return "".join(outmsg)
def error(self, msg, line=None):
raise ValueError("error: " + self.gen_error(msg, line))
def warn(self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline(self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while True:
# read the line, make it None if EOF
line = self.file.readline()
if line == '':
line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = line.find("#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if line.strip() == "":
continue
else: # it's an escaped "#"
line = line.replace("\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = line.lstrip()
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if isinstance(self.current_line, list):
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line + 1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if isinstance(self.current_line, list):
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = line.strip()
elif self.lstrip_ws:
line = line.lstrip()
elif self.rstrip_ws:
line = line.rstrip()
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
def readlines(self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while True:
line = self.readline()
if line is None:
return lines
lines.append(line)
def unreadline(self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append(line)
|
gpl-2.0
|
anthonyfok/frescobaldi
|
frescobaldi_app/midifile/output.py
|
1
|
5282
|
# Python midifile package -- parse, load and play MIDI files.
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Writes MIDI events to a MIDI output.
"""
import contextlib
from . import event
class Output(object):
"""Abstract base class for a MIDI output.
Inherit to implement the actual writing to MIDI ports.
The midiplayer.Player calls midi_event and all_notes_off.
"""
def midi_event(self, midi):
"""Handles a list or dict of MIDI events from a Song (midisong.py)."""
if isinstance(midi, dict):
# dict mapping track to events?
midi = sum(map(midi.get, sorted(midi)), [])
self.send_events(midi)
def reset(self):
"""Restores the MIDI output to an initial state.
Sets the program to 0, the volume to 90 and sends reset_controllers
messages to all channels.
"""
self.reset_controllers()
self.set_main_volume(90)
self.set_program_change(0)
def set_main_volume(self, volume, channel=-1):
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_MSB_MAIN_VOLUME, volume))
def set_program_change(self, program, channel=-1):
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ProgramChangeEvent(c, program))
def reset_controllers(self, channel=-1):
"""Sends an all_notes_off message to a channel.
If the channel is -1 (the default), sends the message to all channels.
"""
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_RESET_CONTROLLERS, 0))
def all_sounds_off(self, channel=-1):
"""Sends an all_notes_off message to a channel.
If the channel is -1 (the default), sends the message to all channels.
"""
channels = range(16) if channel == -1 else (channel,)
with self.sender() as send:
for c in channels:
send(event.ControllerEvent(c, event.MIDI_CTL_ALL_NOTES_OFF, 0))
send(event.ControllerEvent(c, event.MIDI_CTL_ALL_SOUNDS_OFF, 0))
def send_events(self, events):
"""Writes the list of events to the output port.
Each event is one of the event types in event.py
Implement to do the actual writing.
"""
pass
@contextlib.contextmanager
def sender(self):
"""Returns a context manager to call for each event to send.
When the context manager exits, the events are sent using the
send_events() method.
"""
l = []
yield l.append
if l:
self.send_events(l)
class PortMidiOutput(Output):
"""Writes events to a PortMIDI Output instance.
The PortMIDI Output instance should be in the output attribute.
"""
output = None
def send_events(self, events):
"""Writes the list of events to the PortMIDI output port."""
l = []
for e in events:
m = self.convert_event(e)
if m:
l.append([m, 0])
while len(l) > 1024:
self.output.write(l[:1024])
l = l[1024:]
if l:
self.output.write(l)
def convert_event(self, e):
"""Returns a list of integers representing a MIDI message from event."""
t = type(e)
if t is event.NoteEvent:
return self.convert_note_event(e)
elif t is event.PitchBendEvent:
return self.convert_pitchbend_event(e)
elif t is event.ProgramChangeEvent:
return self.convert_programchange_event(e)
elif t is event.ControllerEvent:
return self.convert_controller_event(e)
def convert_note_event(self, e):
return [e.type * 16 + e.channel, e.note, e.value]
def convert_programchange_event(self, e):
return [0xC0 + e.channel, e.number]
def convert_controller_event(self, e):
return [0xB0 + e.channel, e.number, e.value]
def convert_pitchbend_event(self, e):
return [0xE0 + e.channel, e.value & 0x7F, e.value >> 7]
|
gpl-2.0
|
mattholl/three.js
|
utils/exporters/maya/plug-ins/threeJsFileTranslator.py
|
210
|
16160
|
__author__ = 'Sean Griffin'
__version__ = '1.0.0'
__email__ = 'sean@thoughtbot.com'
import sys
import os.path
import json
import shutil
from pymel.core import *
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces',
'materials', 'diffuseMaps', 'specularMaps', 'bumpMaps', 'copyTextures',
'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput']
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.verticeOffset = 0
self.uvOffset = 0
self.normalOffset = 0
self.vertices = []
self.materials = []
self.faces = []
self.normals = []
self.uvs = []
self.morphTargets = []
self.bones = []
self.animations = []
self.skinIndices = []
self.skinWeights = []
if self.options["bakeAnimations"]:
print("exporting animations")
self._exportAnimations()
self._goToFrame(self.options["startFrame"])
if self.options["materials"]:
print("exporting materials")
self._exportMaterials()
if self.options["bones"]:
print("exporting bones")
select(map(lambda m: m.getParent(), ls(type='mesh')))
runtime.GoToBindPose()
self._exportBones()
print("exporting skins")
self._exportSkins()
print("exporting meshes")
self._exportMeshes()
if self.options["skeletalAnim"]:
print("exporting keyframe animations")
self._exportKeyframeAnimations()
print("writing file")
output = {
'metadata': {
'formatVersion': 3.1,
'generatedBy': 'Maya Exporter'
},
'vertices': self.vertices,
'uvs': [self.uvs],
'faces': self.faces,
'normals': self.normals,
'materials': self.materials,
}
if self.options['bakeAnimations']:
output['morphTargets'] = self.morphTargets
if self.options['bones']:
output['bones'] = self.bones
output['skinIndices'] = self.skinIndices
output['skinWeights'] = self.skinWeights
output['influencesPerVertex'] = self.options["influencesPerVertex"]
if self.options['skeletalAnim']:
output['animations'] = self.animations
with file(path, 'w') as f:
if self.options['prettyOutput']:
f.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
else:
f.write(json.dumps(output, separators=(",",":")))
def _allMeshes(self):
if not hasattr(self, '__allMeshes'):
self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh'))
return self.__allMeshes
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
for key in self.componentKeys:
self.options[key] = key in optionsString
if self.options["bones"]:
boneOptionsString = optionsString[optionsString.find("bones"):]
boneOptions = boneOptionsString.split(' ')
self.options["influencesPerVertex"] = int(boneOptions[1])
if self.options["bakeAnimations"]:
bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):]
bakeAnimOptions = bakeAnimOptionsString.split(' ')
self.options["startFrame"] = int(bakeAnimOptions[1])
self.options["endFrame"] = int(bakeAnimOptions[2])
self.options["stepFrame"] = int(bakeAnimOptions[3])
def _exportMeshes(self):
if self.options['vertices']:
self._exportVertices()
for mesh in self._allMeshes():
self._exportMesh(mesh)
def _exportMesh(self, mesh):
print("Exporting " + mesh.name())
if self.options['faces']:
print("Exporting faces")
self._exportFaces(mesh)
self.verticeOffset += len(mesh.getPoints())
self.uvOffset += mesh.numUVs()
self.normalOffset += mesh.numNormals()
if self.options['normals']:
print("Exporting normals")
self._exportNormals(mesh)
if self.options['uvs']:
print("Exporting UVs")
self._exportUVs(mesh)
def _getMaterialIndex(self, face, mesh):
if not hasattr(self, '_materialIndices'):
self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)])
if self.options['materials']:
for engine in mesh.listConnections(type='shadingEngine'):
if sets(engine, isMember=face) or sets(engine, isMember=mesh):
for material in engine.listConnections(type='lambert'):
if self._materialIndices.has_key(material.name()):
return self._materialIndices[material.name()]
return -1
def _exportVertices(self):
self.vertices += self._getVertices()
def _exportAnimations(self):
for frame in self._framesToExport():
self._exportAnimationForFrame(frame)
def _framesToExport(self):
return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"])
def _exportAnimationForFrame(self, frame):
print("exporting frame " + str(frame))
self._goToFrame(frame)
self.morphTargets.append({
'name': "frame_" + str(frame),
'vertices': self._getVertices()
})
def _getVertices(self):
return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]]
def _goToFrame(self, frame):
currentTime(frame)
def _exportFaces(self, mesh):
typeBitmask = self._getTypeBitmask()
for face in mesh.faces:
materialIndex = self._getMaterialIndex(face, mesh)
hasMaterial = materialIndex != -1
self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial)
self.faces += map(lambda x: x + self.verticeOffset, face.getVertices())
if self.options['materials']:
if hasMaterial:
self.faces.append(materialIndex)
if self.options['uvs'] and face.hasUVs():
self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount()))
if self.options['normals']:
self._exportFaceVertexNormals(face)
def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True):
if face.polygonVertexCount() == 4:
faceBitmask = 1
else:
faceBitmask = 0
if hasMaterial:
faceBitmask |= (1 << 1)
if self.options['uvs'] and face.hasUVs():
faceBitmask |= (1 << 3)
self.faces.append(typeBitmask | faceBitmask)
def _exportFaceVertexNormals(self, face):
for i in range(face.polygonVertexCount()):
self.faces.append(face.normalIndex(i) + self.normalOffset)
def _exportNormals(self, mesh):
for normal in mesh.getNormals():
self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)]
def _exportUVs(self, mesh):
us, vs = mesh.getUVs()
for i, u in enumerate(us):
self.uvs.append(u)
self.uvs.append(vs[i])
def _getTypeBitmask(self):
bitmask = 0
if self.options['normals']:
bitmask |= 32
return bitmask
def _exportMaterials(self):
for mat in ls(type='lambert'):
self.materials.append(self._exportMaterial(mat))
def _exportMaterial(self, mat):
result = {
"DbgName": mat.name(),
"blending": "NormalBlending",
"colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb),
"colorAmbient": mat.getAmbientColor().rgb,
"depthTest": True,
"depthWrite": True,
"shading": mat.__class__.__name__,
"transparency": mat.getTransparency().a,
"transparent": mat.getTransparency().a != 1.0,
"vertexColors": False
}
if isinstance(mat, nodetypes.Phong):
result["colorSpecular"] = mat.getSpecularColor().rgb
result["specularCoef"] = mat.getCosPower()
if self.options["specularMaps"]:
self._exportSpecularMap(result, mat)
if self.options["bumpMaps"]:
self._exportBumpMap(result, mat)
if self.options["diffuseMaps"]:
self._exportDiffuseMap(result, mat)
return result
def _exportBumpMap(self, result, mat):
for bump in mat.listConnections(type='bump2d'):
for f in bump.listConnections(type='file'):
result["mapNormalFactor"] = 1
self._exportFile(result, f, "Normal")
def _exportDiffuseMap(self, result, mat):
for f in mat.attr('color').inputs():
result["colorDiffuse"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Diffuse")
def _exportSpecularMap(self, result, mat):
for f in mat.attr('specularColor').inputs():
result["colorSpecular"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Specular")
def _exportFile(self, result, mapFile, mapType):
src = mapFile.ftn.get()
targetDir = os.path.dirname(self.path)
fName = os.path.basename(src)
if self.options['copyTextures']:
shutil.copy2(src, os.path.join(targetDir, fName))
result["map" + mapType] = fName
result["map" + mapType + "Repeat"] = [1, 1]
result["map" + mapType + "Wrap"] = ["repeat", "repeat"]
result["map" + mapType + "Anistropy"] = 4
def _exportBones(self):
for joint in ls(type='joint'):
if joint.getParent():
parentIndex = self._indexOfJoint(joint.getParent().name())
else:
parentIndex = -1
rotq = joint.getRotation(quaternion=True) * joint.getOrientation()
pos = joint.getTranslation()
self.bones.append({
"parent": parentIndex,
"name": joint.name(),
"pos": self._roundPos(pos),
"rotq": self._roundQuat(rotq)
})
def _indexOfJoint(self, name):
if not hasattr(self, '_jointNames'):
self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))])
if name in self._jointNames:
return self._jointNames[name]
else:
return -1
def _exportKeyframeAnimations(self):
hierarchy = []
i = -1
frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value()
for joint in ls(type='joint'):
hierarchy.append({
"parent": i,
"keys": self._getKeyframes(joint, frameRate)
})
i += 1
self.animations.append({
"name": "skeletalAction.001",
"length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate,
"fps": 1,
"hierarchy": hierarchy
})
def _getKeyframes(self, joint, frameRate):
firstFrame = playbackOptions(minTime=True, query=True)
lastFrame = playbackOptions(maxTime=True, query=True)
frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame])))
keys = []
print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes")
for frame in frames:
self._goToFrame(frame)
keys.append(self._getCurrentKeyframe(joint, frame, frameRate))
return keys
def _getCurrentKeyframe(self, joint, frame, frameRate):
pos = joint.getTranslation()
rot = joint.getRotation(quaternion=True) * joint.getOrientation()
return {
'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate,
'pos': self._roundPos(pos),
'rot': self._roundQuat(rot),
'scl': [1,1,1]
}
def _roundPos(self, pos):
return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z])
def _roundQuat(self, rot):
return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w])
def _exportSkins(self):
for mesh in self._allMeshes():
print("exporting skins for mesh: " + mesh.name())
skins = filter(lambda skin: mesh in skin.getOutputGeometry(), ls(type='skinCluster'))
if len(skins) > 0:
print("mesh has " + str(len(skins)) + " skins")
skin = skins[0]
joints = skin.influenceObjects()
for weights in skin.getWeights(mesh.vtx):
numWeights = 0
for i in range(0, len(weights)):
if weights[i] > 0:
self.skinWeights.append(weights[i])
self.skinIndices.append(self._indexOfJoint(joints[i].name()))
numWeights += 1
if numWeights > self.options["influencesPerVertex"]:
raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".")
for i in range(0, self.options["influencesPerVertex"] - numWeights):
self.skinWeights.append(0)
self.skinIndices.append(0)
else:
print("mesh has no skins, appending 0")
for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]):
self.skinWeights.append(0)
self.skinIndices.append(0)
class NullAnimCurve(object):
def getValue(self, index):
return 0.0
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise
class FramesPerSecond(object):
MAYA_VALUES = {
'game': 15,
'film': 24,
'pal': 25,
'ntsc': 30,
'show': 48,
'palf': 50,
'ntscf': 60
}
def __init__(self, fpsString):
self.fpsString = fpsString
def value(self):
if self.fpsString in FramesPerSecond.MAYA_VALUES:
return FramesPerSecond.MAYA_VALUES[self.fpsString]
else:
return int(filter(lambda c: c.isdigit(), self.fpsString))
|
mit
|
terkkila/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
tobinjt/Flexget
|
flexget/plugin.py
|
4
|
22382
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.error import HTTPError, URLError
from future.utils import python_2_unicode_compatible
import logging
import os
import re
import time
import pkg_resources
from functools import total_ordering
from http.client import BadStatusLine
from importlib import import_module
from path import Path
from requests import RequestException
from flexget import plugins as plugins_pkg
from flexget import components as components_pkg
from flexget import config_schema
from flexget.event import add_event_handler as add_phase_handler
from flexget.event import fire_event, remove_event_handlers
log = logging.getLogger('plugin')
PRIORITY_DEFAULT = 128
PRIORITY_LAST = -255
PRIORITY_FIRST = 255
@python_2_unicode_compatible
class DependencyError(Exception):
"""Plugin depends on other plugin, but it cannot be loaded.
Args:
issued_by: name of the plugin trying to do the import
missing: name of the plugin or library that is missing
message: customized user readable error message
All args are optional.
"""
def __init__(self, issued_by=None, missing=None, message=None, silent=False):
super(DependencyError, self).__init__()
self.issued_by = issued_by
self.missing = missing
self._message = message
self.silent = silent
def _get_message(self):
if self._message:
return self._message
else:
return 'Plugin `%s` requires dependency `%s`' % (self.issued_by, self.missing)
def _set_message(self, message):
self._message = message
def has_message(self):
return self._message is not None
message = property(_get_message, _set_message)
def __str__(self):
return '<DependencyError(issued_by=%r,missing=%r,message=%r,silent=%r)>' % (
self.issued_by,
self.missing,
self.message,
self.silent,
)
class RegisterException(Exception):
def __init__(self, value):
super(RegisterException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
@python_2_unicode_compatible
class PluginWarning(Warning):
def __init__(self, value, logger=log, **kwargs):
super(PluginWarning, self).__init__()
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return self.value
@python_2_unicode_compatible
class PluginError(Exception):
def __init__(self, value, logger=log, **kwargs):
super(PluginError, self).__init__()
# Value is expected to be a string
if not isinstance(value, str):
value = str(value)
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return self.value
# TODO: move to utils or somewhere more appropriate
class internet(object):
"""@internet decorator for plugin phase methods.
Catches all internet related exceptions and raises PluginError with relevant message.
Task handles PluginErrors by aborting the task.
"""
def __init__(self, logger=None):
if logger:
self.log = logger
else:
self.log = logging.getLogger('@internet')
def __call__(self, func):
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except RequestException as e:
log.debug('decorator caught RequestException. handled traceback:', exc_info=True)
raise PluginError('RequestException: %s' % e)
except HTTPError as e:
raise PluginError('HTTPError %s' % e.code, self.log)
except URLError as e:
log.debug('decorator caught urlerror. handled traceback:', exc_info=True)
raise PluginError('URLError %s' % e.reason, self.log)
except BadStatusLine:
log.debug('decorator caught badstatusline. handled traceback:', exc_info=True)
raise PluginError('Got BadStatusLine', self.log)
except ValueError as e:
log.debug('decorator caught ValueError. handled traceback:', exc_info=True)
raise PluginError(e)
except IOError as e:
log.debug('decorator caught ioerror. handled traceback:', exc_info=True)
if hasattr(e, 'reason'):
raise PluginError('Failed to reach server. Reason: %s' % e.reason, self.log)
elif hasattr(e, 'code'):
raise PluginError(
'The server couldn\'t fulfill the request. Error code: %s' % e.code,
self.log,
)
raise PluginError('IOError when connecting to server: %s' % e, self.log)
return wrapped_func
def priority(value):
"""Priority decorator for phase methods"""
def decorator(target):
target.priority = value
return target
return decorator
# task phases, in order of their execution; note that this can be extended by
# registering new phases at runtime
task_phases = [
'prepare',
'start',
'input',
'metainfo',
'filter',
'download',
'modify',
'output',
'learn',
'exit',
]
# map phase names to method names
phase_methods = {
# task
'abort': 'on_task_abort' # special; not a task phase that gets called normally
}
phase_methods.update((_phase, 'on_task_' + _phase) for _phase in task_phases) # DRY
# Mapping of plugin name to PluginInfo instance (logical singletons)
plugins = {}
# Loading done?
plugins_loaded = False
_loaded_plugins = {}
_plugin_options = []
_new_phase_queue = {}
def register_task_phase(name, before=None, after=None):
"""
Adds a new task phase to the available phases.
:param suppress_abort: If True, errors during this phase will be suppressed, and not affect task result.
"""
if before and after:
raise RegisterException('You can only give either before or after for a phase.')
if not before and not after:
raise RegisterException('You must specify either a before or after phase.')
if name in task_phases or name in _new_phase_queue:
raise RegisterException('Phase %s already exists.' % name)
def add_phase(phase_name, before, after):
if before is not None and before not in task_phases:
return False
if after is not None and after not in task_phases:
return False
# add method name to phase -> method lookup table
phase_methods[phase_name] = 'on_task_' + phase_name
# place phase in phase list
if before is None:
task_phases.insert(task_phases.index(after) + 1, phase_name)
if after is None:
task_phases.insert(task_phases.index(before), phase_name)
return True
# if can't add yet (dependencies) queue addition
if not add_phase(name, before, after):
_new_phase_queue[name] = [before, after]
for phase_name, args in list(_new_phase_queue.items()):
if add_phase(phase_name, *args):
del _new_phase_queue[phase_name]
@total_ordering
class PluginInfo(dict):
"""
Allows accessing key/value pairs of this dictionary subclass via
attributes. Also instantiates a plugin and initializes properties.
"""
# Counts duplicate registrations
dupe_counter = 0
def __init__(
self,
plugin_class,
name=None,
interfaces=None,
builtin=False,
debug=False,
api_ver=1,
category=None,
):
"""
Register a plugin.
:param plugin_class: The plugin factory.
:param string name: Name of the plugin (if not given, default to factory class name in underscore form).
:param list interfaces: Interfaces this plugin implements.
:param bool builtin: Auto-activated?
:param bool debug: True if plugin is for debugging purposes.
:param int api_ver: Signature of callback hooks (1=task; 2=task,config).
:param string category: The type of plugin. Can be one of the task phases.
Defaults to the package name containing the plugin.
:param groups: DEPRECATED
"""
dict.__init__(self)
if interfaces is None:
interfaces = ['task']
if name is None:
# Convention is to take camel-case class name and rewrite it to an underscore form,
# e.g. 'PluginName' to 'plugin_name'
name = re.sub(
'[A-Z]+', lambda i: '_' + i.group(0).lower(), plugin_class.__name__
).lstrip('_')
if category is None and plugin_class.__module__.startswith('flexget.plugins'):
# By default look at the containing package of the plugin.
category = plugin_class.__module__.split('.')[-2]
# Check for unsupported api versions
if api_ver < 2:
raise PluginError('Api versions <2 are no longer supported. Plugin %s' % name)
# Set basic info attributes
self.api_ver = api_ver
self.name = name
self.interfaces = interfaces
self.builtin = builtin
self.debug = debug
self.category = category
self.phase_handlers = {}
self.plugin_class = plugin_class
self.instance = None
if self.name in plugins:
PluginInfo.dupe_counter += 1
log.critical(
'Error while registering plugin %s. '
'A plugin with the same name is already registered',
self.name,
)
else:
plugins[self.name] = self
def initialize(self):
if self.instance is not None:
# We already initialized
return
# Create plugin instance
self.instance = self.plugin_class()
self.instance.plugin_info = self # give plugin easy access to its own info
self.instance.log = logging.getLogger(
getattr(self.instance, "LOGGER_NAME", None) or self.name
)
if hasattr(self.instance, 'schema'):
self.schema = self.instance.schema
elif hasattr(self.instance, 'validator'):
self.schema = self.instance.validator().schema()
else:
# TODO: I think plugins without schemas should not be allowed in config, maybe rethink this
self.schema = {}
if self.schema is not None:
location = '/schema/plugin/%s' % self.name
self.schema['id'] = location
config_schema.register_schema(location, self.schema)
self.build_phase_handlers()
def reset_phase_handlers(self):
"""Temporary utility method"""
self.phase_handlers = {}
self.build_phase_handlers()
# TODO: should unregister events (from flexget.event)
# this method is not used at the moment anywhere ...
raise NotImplementedError
def build_phase_handlers(self):
"""(Re)build phase_handlers in this plugin"""
for phase, method_name in phase_methods.items():
if phase in self.phase_handlers:
continue
if hasattr(self.instance, method_name):
method = getattr(self.instance, method_name)
if not callable(method):
continue
# check for priority decorator
if hasattr(method, 'priority'):
handler_prio = method.priority
else:
handler_prio = PRIORITY_DEFAULT
event = add_phase_handler(
'plugin.%s.%s' % (self.name, phase), method, handler_prio
)
# provides backwards compatibility
event.plugin = self
self.phase_handlers[phase] = event
def __getattr__(self, attr):
if attr in self:
return self[attr]
return dict.__getattribute__(self, attr)
def __setattr__(self, attr, value):
self[attr] = value
def __str__(self):
return '<PluginInfo(name=%s)>' % self.name
def _is_valid_operand(self, other):
return hasattr(other, 'name')
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
__repr__ = __str__
register = PluginInfo
def _strip_trailing_sep(path):
return path.rstrip("\\/")
def _get_standard_plugins_path():
"""
:returns: List of directories where traditional plugins should be tried to load from.
"""
# Get basic path from environment
paths = []
env_path = os.environ.get('FLEXGET_PLUGIN_PATH')
if env_path:
paths = env_path.split(os.pathsep)
# Add flexget.plugins directory (core plugins)
paths.append(os.path.abspath(os.path.dirname(plugins_pkg.__file__)))
return paths
def _get_standard_components_path():
"""
:returns: List of directories where component plugins should be tried to load from.
"""
# Get basic path from environment
paths = []
env_path = os.environ.get('FLEXGET_COMPONENT_PATH')
if env_path:
paths = env_path.split(os.pathsep)
# Add flexget.plugins directory (core plugins)
paths.append(os.path.abspath(os.path.dirname(components_pkg.__file__)))
return paths
def _check_phase_queue():
if _new_phase_queue:
for phase, args in _new_phase_queue.items():
log.error(
'Plugin %s requested new phase %s, but it could not be created at requested '
'point (before, after). Plugin is not working properly.',
args[0],
phase,
)
def _import_plugin(module_name, plugin_path):
try:
import_module(module_name)
except DependencyError as e:
if e.has_message():
msg = e.message
else:
msg = 'Plugin `%s` requires plugin `%s` to load.' % (
e.issued_by or module_name,
e.missing or 'N/A',
)
if not e.silent:
log.warning(msg)
else:
log.debug(msg)
except ImportError:
log.critical('Plugin `%s` failed to import dependencies', module_name, exc_info=True)
except ValueError as e:
# Debugging #2755
log.error(
'ValueError attempting to import `%s` (from %s): %s', module_name, plugin_path, e
)
except Exception:
log.critical('Exception while loading plugin %s', module_name, exc_info=True)
raise
else:
log.trace('Loaded module %s from %s', module_name, plugin_path)
def _load_plugins_from_dirs(dirs):
"""
:param list dirs: Directories from where plugins are loaded from
"""
log.debug('Trying to load plugins from: %s', dirs)
dirs = [Path(d) for d in dirs if os.path.isdir(d)]
# add all dirs to plugins_pkg load path so that imports work properly from any of the plugin dirs
plugins_pkg.__path__ = list(map(_strip_trailing_sep, dirs))
for plugins_dir in dirs:
for plugin_path in plugins_dir.walkfiles('*.py'):
if plugin_path.name == '__init__.py':
continue
# Split the relative path from the plugins dir to current file's parent dir to find subpackage names
plugin_subpackages = [
_f for _f in plugin_path.relpath(plugins_dir).parent.splitall() if _f
]
module_name = '.'.join(
[plugins_pkg.__name__] + plugin_subpackages + [plugin_path.stem]
)
_import_plugin(module_name, plugin_path)
_check_phase_queue()
# TODO: this is now identical to _load_plugins_from_dirs, REMOVE
def _load_components_from_dirs(dirs):
"""
:param list dirs: Directories where plugin components are loaded from
"""
log.debug('Trying to load components from: %s', dirs)
dirs = [Path(d) for d in dirs if os.path.isdir(d)]
for component_dir in dirs:
for component_path in component_dir.walkfiles('*.py'):
if component_path.name == '__init__.py':
continue
# Split the relative path from the plugins dir to current file's parent dir to find subpackage names
plugin_subpackages = [
_f for _f in component_path.relpath(component_dir).parent.splitall() if _f
]
package_name = '.'.join(
[components_pkg.__name__] + plugin_subpackages + [component_path.stem]
)
_import_plugin(package_name, component_path)
_check_phase_queue()
def _load_plugins_from_packages():
"""Load plugins installed via PIP"""
for entrypoint in pkg_resources.iter_entry_points('FlexGet.plugins'):
try:
plugin_module = entrypoint.load()
except DependencyError as e:
if e.has_message():
msg = e.message
else:
msg = (
'Plugin `%s` requires `%s` to load.',
e.issued_by or entrypoint.module_name,
e.missing or 'N/A',
)
if not e.silent:
log.warning(msg)
else:
log.debug(msg)
except ImportError:
log.critical(
'Plugin `%s` failed to import dependencies', entrypoint.module_name, exc_info=True
)
except Exception:
log.critical(
'Exception while loading plugin %s', entrypoint.module_name, exc_info=True
)
raise
else:
log.trace(
'Loaded packaged module %s from %s', entrypoint.module_name, plugin_module.__file__
)
_check_phase_queue()
def load_plugins(extra_plugins=None, extra_components=None):
"""
Load plugins from the standard plugin and component paths.
:param list extra_plugins: Extra directories from where plugins are loaded.
:param list extra_components: Extra directories from where components are loaded.
"""
global plugins_loaded
if extra_plugins is None:
extra_plugins = []
if extra_components is None:
extra_components = []
# Add flexget.plugins and flexget.components directories (core dist)
extra_plugins.extend(_get_standard_plugins_path())
extra_components.extend(_get_standard_components_path())
start_time = time.time()
# Import all the plugins
_load_plugins_from_dirs(extra_plugins)
_load_components_from_dirs(extra_components)
_load_plugins_from_packages()
# Register them
fire_event('plugin.register')
# Plugins should only be registered once, remove their handlers after
remove_event_handlers('plugin.register')
# After they have all been registered, instantiate them
for plugin in list(plugins.values()):
plugin.initialize()
took = time.time() - start_time
plugins_loaded = True
log.debug(
'Plugins took %.2f seconds to load. %s plugins in registry.', took, len(plugins.keys())
)
def get_plugins(phase=None, interface=None, category=None, name=None, min_api=None):
"""
Query other plugins characteristics.
:param string phase: Require phase
:param string interface: Plugin must implement this interface.
:param string category: Type of plugin, phase names.
:param string name: Name of the plugin.
:param int min_api: Minimum api version.
:return: List of PluginInfo instances.
:rtype: list
"""
def matches(plugin):
if phase is not None and phase not in phase_methods:
raise ValueError('Unknown phase %s' % phase)
if phase and phase not in plugin.phase_handlers:
return False
if interface and interface not in plugin.interfaces:
return False
if category and not category == plugin.category:
return False
if name is not None and name != plugin.name:
return False
if min_api is not None and plugin.api_ver < min_api:
return False
return True
return filter(matches, iter(plugins.values()))
def plugin_schemas(**kwargs):
"""Create a dict schema that matches plugins specified by `kwargs`"""
return {
'type': 'object',
'properties': dict((p.name, {'$ref': p.schema['id']}) for p in get_plugins(**kwargs)),
'additionalProperties': False,
'error_additionalProperties': '{{message}} Only known plugin names are valid keys.',
'patternProperties': {'^_': {'title': 'Disabled Plugin'}},
}
config_schema.register_schema('/schema/plugins', plugin_schemas)
def get_phases_by_plugin(name):
"""Return all phases plugin :name: hooks"""
return list(get_plugin_by_name(name).phase_handlers)
def get_plugin_by_name(name, issued_by='???'):
"""
Get plugin by name, preferred way since this structure may be changed at some point.
Getting plugin via `.get` function is recommended for normal use.
This results much shorter and cleaner code::
plugin.get_plugin_by_name('parsing').instance.parse_movie(data=entry['title'])
Shortens into::
plugin.get('parsing', self).parse_movie(data=entry['title'])
This function is still useful if you need to access plugin information (PluginInfo).
:returns PluginInfo instance
"""
if name not in plugins:
raise DependencyError(issued_by=issued_by, missing=name)
return plugins[name]
def get(name, requested_by):
"""
:param str name: Name of the requested plugin
:param requested_by: Plugin class instance OR string value who is making the request.
:return: Instance of Plugin class
"""
if name not in plugins:
if hasattr(requested_by, 'plugin_info'):
who = requested_by.plugin_info.name
else:
who = requested_by
raise DependencyError(issued_by=who, missing=name)
instance = plugins[name].instance
if instance is None:
raise Exception('Plugin referred before system initialized?')
return instance
|
mit
|
agry/NGECore2
|
scripts/mobiles/corellia/drall_guard.py
|
2
|
1581
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('drall_guard')
mobileTemplate.setLevel(36)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("drall")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_drall_female.iff')
templates.add('object/mobile/shared_drall_male.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('drall_guard', mobileTemplate)
return
|
lgpl-3.0
|
hotpxl/mxnet
|
python/mxnet/module/module.py
|
1
|
30717
|
# pylint: disable=too-many-instance-attributes, too-many-arguments, protected-access, too-many-branches
# pylint: disable=too-many-public-methods
"""A `Module` implement the `BaseModule` API by wrapping a `Symbol` and one or
more `Executor` for data parallelization.
"""
import logging
import warnings
from .. import context as ctx
from .. import ndarray as nd
from .. import optimizer as opt
from .executor_group import DataParallelExecutorGroup
from ..model import _create_kvstore, _initialize_kvstore, _update_params, _update_params_on_kvstore
from ..model import load_checkpoint
from ..initializer import Uniform, InitDesc
from ..io import DataDesc
from .base_module import BaseModule, _check_input_names, _parse_data_desc
class Module(BaseModule):
"""Module is a basic module that wrap a `Symbol`. It is functionally the same
as the `FeedForward` model, except under the module API.
Parameters
----------
symbol : Symbol
data_names : list of str
Defaults to `('data')` for a typical model used in image classification.
label_names : list of str
Defaults to `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Defaults to `logging`.
context : Context or list of Context
Defaults to ``mx.cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
state_names : list of str
states are similar to data and label, but not provided by data iterator.
Instead they are initialized to 0 and can be set by `set_states()`.
"""
def __init__(self, symbol, data_names=('data',), label_names=('softmax_label',),
logger=logging, context=ctx.cpu(), work_load_list=None,
fixed_param_names=None, state_names=None):
super(Module, self).__init__(logger=logger)
if isinstance(context, ctx.Context):
context = [context]
self._context = context
if work_load_list is None:
work_load_list = [1] * len(self._context)
assert len(work_load_list) == len(self._context)
self._work_load_list = work_load_list
self._symbol = symbol
data_names = list(data_names) if data_names is not None else []
label_names = list(label_names) if label_names is not None else []
state_names = list(state_names) if state_names is not None else []
fixed_param_names = list(fixed_param_names) if fixed_param_names is not None else []
_check_input_names(symbol, data_names, "data", True)
_check_input_names(symbol, label_names, "label", False)
_check_input_names(symbol, state_names, "state", True)
_check_input_names(symbol, fixed_param_names, "fixed_param", True)
arg_names = symbol.list_arguments()
input_names = data_names + label_names + state_names
self._param_names = [x for x in arg_names if x not in input_names]
self._fixed_param_names = fixed_param_names
self._aux_names = symbol.list_auxiliary_states()
self._data_names = data_names
self._label_names = label_names
self._state_names = state_names
self._output_names = symbol.list_outputs()
self._arg_params = None
self._aux_params = None
self._params_dirty = False
self._optimizer = None
self._kvstore = None
self._update_on_kvstore = None
self._updater = None
self._preload_opt_states = None
self._grad_req = None
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@staticmethod
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Creates a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is ``cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
"""Saves current progress to checkpoint.
Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to.
epoch : int
The current epoch number.
save_optimizer_states : bool
Whether to save optimizer states to continue training.
"""
self._symbol.save('%s-symbol.json'%prefix)
param_name = '%s-%04d.params' % (prefix, epoch)
self.save_params(param_name)
logging.info('Saved checkpoint to \"%s\"', param_name)
if save_optimizer_states:
state_name = '%s-%04d.states' % (prefix, epoch)
self.save_optimizer_states(state_name)
logging.info('Saved optimizer state to \"%s\"', state_name)
def _reset_bind(self):
"""Internal function to reset binded state."""
self.binded = False
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
@property
def data_names(self):
"""A list of names for data required by this module."""
return self._data_names
@property
def label_names(self):
"""A list of names for labels required by this module."""
return self._label_names
@property
def output_names(self):
"""A list of names for the outputs of this module."""
return self._output_names
@property
def data_shapes(self):
"""Gets data shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._data_shapes
@property
def label_shapes(self):
"""Gets label shapes.
Returns
-------
A list of `(name, shape)` pairs.
The return value could be ``None`` if
the module does not need labels, or if the module is not bound for
training (in this case, label information is not available).
"""
assert self.binded
return self._label_shapes
@property
def output_shapes(self):
"""Gets output shapes.
Returns
-------
A list of `(name, shape)` pairs.
"""
assert self.binded
return self._exec_group.get_output_shapes()
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
"""
assert self.binded and self.params_initialized
if self._params_dirty:
self._sync_params_from_devices()
return (self._arg_params, self._aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing arg_params. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing aux_params. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"init_params call ignored.", stacklevel=2)
return
assert self.binded, 'call bind before initializing the parameters'
def _impl(name, arr, cache):
"""Internal helper for parameter initialization"""
if cache is not None:
if name in cache:
cache_arr = cache[name]
# just in case the cached array is just the target itself
if cache_arr is not arr:
cache_arr.copyto(arr)
else:
if not allow_missing:
raise RuntimeError("%s is not presented" % name)
if initializer is not None:
initializer(name, arr)
else:
initializer(name, arr)
attrs = self._symbol.attr_dict()
for name, arr in self._arg_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, arg_params)
for name, arr in self._aux_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, aux_params)
self.params_initialized = True
self._params_dirty = False
# copy the initialized parameters to devices
self._exec_group.set_params(self._arg_params, self._aux_params,
allow_extra=allow_extra)
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
if not for_training:
assert not inputs_need_grad
else:
pass
# this is not True, as some module might not contains a loss function
# that consumes the labels
# assert label_shapes is not None
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
if shared_module is not None:
assert isinstance(shared_module, Module) and \
shared_module.binded and shared_module.params_initialized
shared_group = shared_module._exec_group
else:
shared_group = None
self._exec_group = DataParallelExecutorGroup(self._symbol, self._context,
self._work_load_list, self._data_shapes,
self._label_shapes, self._param_names,
for_training, inputs_need_grad,
shared_group, logger=self.logger,
fixed_param_names=self._fixed_param_names,
grad_req=grad_req,
state_names=self._state_names)
self._total_exec_bytes = self._exec_group._total_exec_bytes
if shared_module is not None:
self.params_initialized = True
self._arg_params = shared_module._arg_params
self._aux_params = shared_module._aux_params
elif self.params_initialized:
# if the parameters are already initialized, we are re-binding
# so automatically copy the already initialized params
self._exec_group.set_params(self._arg_params, self._aux_params)
else:
assert self._arg_params is None and self._aux_params is None
param_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.param_arrays
]
self._arg_params = {name:arr for name, arr in zip(self._param_names, param_arrays)}
aux_arrays = [
nd.zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.aux_arrays
]
self._aux_params = {name:arr for name, arr in zip(self._aux_names, aux_arrays)}
if shared_module is not None and shared_module.optimizer_initialized:
self.borrow_optimizer(shared_module)
def reshape(self, data_shapes, label_shapes=None):
"""Reshapes the module for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
"""
assert self.binded
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
self._exec_group.reshape(self._data_shapes, self._label_shapes)
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring...')
return
if self._params_dirty:
self._sync_params_from_devices()
(kvstore, update_on_kvstore) = \
_create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:
batch_size *= kvstore.num_workers
rescale_grad = 1.0/batch_size
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i*len(self._context)+k: n
for i, n in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer,
sym=self.symbol, param_idx2name=idx2name,
**optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
#pylint: disable=no-member
warnings.warn(
"Optimizer created manually outside Module but rescale_grad " +
"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%(
optimizer.rescale_grad, rescale_grad) +
"Is this intended?", stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
# copy initialized local parameters to kvstore
_initialize_kvstore(kvstore=kvstore,
param_arrays=self._exec_group.param_arrays,
arg_params=self._arg_params,
param_names=self._param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
self.load_optimizer_states(self._preload_opt_states)
self._preload_opt_states = None
def borrow_optimizer(self, shared_module):
"""Borrows optimizer from a shared module. Used in bucketing, where exactly the same
optimizer (esp. kvstore) is used.
Parameters
----------
shared_module : Module
"""
assert shared_module.optimizer_initialized
self._optimizer = shared_module._optimizer
self._kvstore = shared_module._kvstore
self._update_on_kvstore = shared_module._update_on_kvstore
self._updater = shared_module._updater
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
"""
assert self.binded and self.params_initialized
curr_data_shapes = tuple(i.shape for i in self._data_shapes)
new_data_shapes = tuple(i.shape for i in data_batch.data)
if curr_data_shapes != new_data_shapes:
if hasattr(data_batch, "provide_data") and data_batch.provide_data:
new_dshape = data_batch.provide_data
else:
new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \
for i, shape in zip(self._data_shapes, new_data_shapes)]
if hasattr(data_batch, "provide_label") and data_batch.provide_label:
new_lshape = data_batch.provide_label
elif hasattr(data_batch, "label") and data_batch.label:
new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \
for i, j in zip(self._label_shapes, data_batch.label)]
else:
new_lshape = None
self.reshape(new_dshape, new_lshape)
self._exec_group.forward(data_batch, is_train)
def backward(self, out_grads=None):
"""Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.binded and self.params_initialized
self._exec_group.backward(out_grads=out_grads)
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
See Also
----------
:meth:`BaseModule.update`.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
self._kvstore, self._exec_group.param_names)
else:
_update_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore,
param_names=self._exec_group.param_names)
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray`
might live on different devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Output.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
States
"""
assert self.binded and self.params_initialized
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of the states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
a single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._exec_group.set_states(states, value)
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
See Also
----------
:meth:`BaseModule.update_metric`.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
self._exec_group.update_metric(eval_metric, labels)
def _sync_params_from_devices(self):
"""Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
self._params_dirty = False
def save_optimizer_states(self, fname):
"""Saves optimizer (updater) state to a file.
Parameters
----------
fname : str
Path to output states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname)
else:
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states())
def load_optimizer_states(self, fname):
"""Loads optimizer (updater) state from a file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
else:
self._updater.set_states(open(fname, 'rb').read())
def install_monitor(self, mon):
"""Installs monitor on all executors. """
assert self.binded
self._exec_group.install_monitor(mon)
|
apache-2.0
|
yograterol/flask-bundle-system
|
setup.py
|
1
|
1161
|
"""
flask-bundle-system
-------------------
Links
`````
* `documentation <http://packages.python.org/flask-bundle-system>`_
* `development version
<http://github.com/yograterol/flask-bundle-system/zipball/master>`_
"""
from setuptools import setup
setup(
name='flask-bundle-system',
version='0.2',
license='BSD',
author='yograterol',
author_email='yograterol@fedoraproject.org',
url="http://www.yograterol.me",
download_url='https://github.com/yograterol/flask-bundle-system',
description='Flask extension for work with blueprints as bundles',
long_description=__doc__,
packages=['flask_bundlesystem'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
bsd-3-clause
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/proximity/__init__.py
|
3
|
9248
|
"""Support for tracking the proximity of a device."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICES, CONF_UNIT_OF_MEASUREMENT, CONF_ZONE)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.util.distance import convert
from homeassistant.util.location import distance
_LOGGER = logging.getLogger(__name__)
ATTR_DIR_OF_TRAVEL = 'dir_of_travel'
ATTR_DIST_FROM = 'dist_to_zone'
ATTR_NEAREST = 'nearest'
CONF_IGNORED_ZONES = 'ignored_zones'
CONF_TOLERANCE = 'tolerance'
DEFAULT_DIR_OF_TRAVEL = 'not set'
DEFAULT_DIST_TO_ZONE = 'not set'
DEFAULT_NEAREST = 'not set'
DEFAULT_PROXIMITY_ZONE = 'home'
DEFAULT_TOLERANCE = 1
DEPENDENCIES = ['zone', 'device_tracker']
DOMAIN = 'proximity'
UNITS = ['km', 'm', 'mi', 'ft']
ZONE_SCHEMA = vol.Schema({
vol.Optional(CONF_ZONE, default=DEFAULT_PROXIMITY_ZONE): cv.string,
vol.Optional(CONF_DEVICES, default=[]):
vol.All(cv.ensure_list, [cv.entity_id]),
vol.Optional(CONF_IGNORED_ZONES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_TOLERANCE, default=DEFAULT_TOLERANCE): cv.positive_int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): vol.All(cv.string, vol.In(UNITS)),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: cv.schema_with_slug_keys(ZONE_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
def setup_proximity_component(hass, name, config):
"""Set up the individual proximity component."""
ignored_zones = config.get(CONF_IGNORED_ZONES)
proximity_devices = config.get(CONF_DEVICES)
tolerance = config.get(CONF_TOLERANCE)
proximity_zone = name
unit_of_measurement = config.get(
CONF_UNIT_OF_MEASUREMENT, hass.config.units.length_unit)
zone_id = 'zone.{}'.format(config.get(CONF_ZONE))
proximity = Proximity(hass, proximity_zone, DEFAULT_DIST_TO_ZONE,
DEFAULT_DIR_OF_TRAVEL, DEFAULT_NEAREST,
ignored_zones, proximity_devices, tolerance,
zone_id, unit_of_measurement)
proximity.entity_id = '{}.{}'.format(DOMAIN, proximity_zone)
proximity.schedule_update_ha_state()
track_state_change(
hass, proximity_devices, proximity.check_proximity_state_change)
return True
def setup(hass, config):
"""Get the zones and offsets from configuration.yaml."""
for zone, proximity_config in config[DOMAIN].items():
setup_proximity_component(hass, zone, proximity_config)
return True
class Proximity(Entity):
"""Representation of a Proximity."""
def __init__(self, hass, zone_friendly_name, dist_to, dir_of_travel,
nearest, ignored_zones, proximity_devices, tolerance,
proximity_zone, unit_of_measurement):
"""Initialize the proximity."""
self.hass = hass
self.friendly_name = zone_friendly_name
self.dist_to = dist_to
self.dir_of_travel = dir_of_travel
self.nearest = nearest
self.ignored_zones = ignored_zones
self.proximity_devices = proximity_devices
self.tolerance = tolerance
self.proximity_zone = proximity_zone
self._unit_of_measurement = unit_of_measurement
@property
def name(self):
"""Return the name of the entity."""
return self.friendly_name
@property
def state(self):
"""Return the state."""
return self.dist_to
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DIR_OF_TRAVEL: self.dir_of_travel,
ATTR_NEAREST: self.nearest,
}
def check_proximity_state_change(self, entity, old_state, new_state):
"""Perform the proximity checking."""
entity_name = new_state.name
devices_to_calculate = False
devices_in_zone = ''
zone_state = self.hass.states.get(self.proximity_zone)
proximity_latitude = zone_state.attributes.get('latitude')
proximity_longitude = zone_state.attributes.get('longitude')
# Check for devices in the monitored zone.
for device in self.proximity_devices:
device_state = self.hass.states.get(device)
if device_state is None:
devices_to_calculate = True
continue
if device_state.state not in self.ignored_zones:
devices_to_calculate = True
# Check the location of all devices.
if (device_state.state).lower() == (self.friendly_name).lower():
device_friendly = device_state.name
if devices_in_zone != '':
devices_in_zone = devices_in_zone + ', '
devices_in_zone = devices_in_zone + device_friendly
# No-one to track so reset the entity.
if not devices_to_calculate:
self.dist_to = 'not set'
self.dir_of_travel = 'not set'
self.nearest = 'not set'
self.schedule_update_ha_state()
return
# At least one device is in the monitored zone so update the entity.
if devices_in_zone != '':
self.dist_to = 0
self.dir_of_travel = 'arrived'
self.nearest = devices_in_zone
self.schedule_update_ha_state()
return
# We can't check proximity because latitude and longitude don't exist.
if 'latitude' not in new_state.attributes:
return
# Collect distances to the zone for all devices.
distances_to_zone = {}
for device in self.proximity_devices:
# Ignore devices in an ignored zone.
device_state = self.hass.states.get(device)
if device_state.state in self.ignored_zones:
continue
# Ignore devices if proximity cannot be calculated.
if 'latitude' not in device_state.attributes:
continue
# Calculate the distance to the proximity zone.
dist_to_zone = distance(proximity_latitude,
proximity_longitude,
device_state.attributes['latitude'],
device_state.attributes['longitude'])
# Add the device and distance to a dictionary.
distances_to_zone[device] = round(
convert(dist_to_zone, 'm', self.unit_of_measurement), 1)
# Loop through each of the distances collected and work out the
# closest.
closest_device = None # type: str
dist_to_zone = None # type: float
for device in distances_to_zone:
if not dist_to_zone or distances_to_zone[device] < dist_to_zone:
closest_device = device
dist_to_zone = distances_to_zone[device]
# If the closest device is one of the other devices.
if closest_device != entity:
self.dist_to = round(distances_to_zone[closest_device])
self.dir_of_travel = 'unknown'
device_state = self.hass.states.get(closest_device)
self.nearest = device_state.name
self.schedule_update_ha_state()
return
# Stop if we cannot calculate the direction of travel (i.e. we don't
# have a previous state and a current LAT and LONG).
if old_state is None or 'latitude' not in old_state.attributes:
self.dist_to = round(distances_to_zone[entity])
self.dir_of_travel = 'unknown'
self.nearest = entity_name
self.schedule_update_ha_state()
return
# Reset the variables
distance_travelled = 0
# Calculate the distance travelled.
old_distance = distance(proximity_latitude, proximity_longitude,
old_state.attributes['latitude'],
old_state.attributes['longitude'])
new_distance = distance(proximity_latitude, proximity_longitude,
new_state.attributes['latitude'],
new_state.attributes['longitude'])
distance_travelled = round(new_distance - old_distance, 1)
# Check for tolerance
if distance_travelled < self.tolerance * -1:
direction_of_travel = 'towards'
elif distance_travelled > self.tolerance:
direction_of_travel = 'away_from'
else:
direction_of_travel = 'stationary'
# Update the proximity entity
self.dist_to = round(dist_to_zone)
self.dir_of_travel = direction_of_travel
self.nearest = entity_name
self.schedule_update_ha_state()
_LOGGER.debug('proximity.%s update entity: distance=%s: direction=%s: '
'device=%s', self.friendly_name, round(dist_to_zone),
direction_of_travel, entity_name)
_LOGGER.info('%s: proximity calculation complete', entity_name)
|
apache-2.0
|
derekforeman/Wox
|
PythonHome/Lib/site-packages/pip/commands/zip.py
|
393
|
14821
|
import sys
import re
import fnmatch
import os
import shutil
import zipfile
from pip.util import display_path, backup_dir, rmtree
from pip.log import logger
from pip.exceptions import InstallationError
from pip.basecommand import Command
class ZipCommand(Command):
"""Zip individual packages."""
name = 'zip'
usage = """
%prog [options] <package> ..."""
summary = 'DEPRECATED. Zip individual packages.'
def __init__(self, *args, **kw):
super(ZipCommand, self).__init__(*args, **kw)
if self.name == 'zip':
self.cmd_opts.add_option(
'--unzip',
action='store_true',
dest='unzip',
help='Unzip (rather than zip) a package.')
else:
self.cmd_opts.add_option(
'--zip',
action='store_false',
dest='unzip',
default=True,
help='Zip (rather than unzip) a package.')
self.cmd_opts.add_option(
'--no-pyc',
action='store_true',
dest='no_pyc',
help='Do not include .pyc files in zip files (useful on Google App Engine).')
self.cmd_opts.add_option(
'-l', '--list',
action='store_true',
dest='list',
help='List the packages available, and their zip status.')
self.cmd_opts.add_option(
'--sort-files',
action='store_true',
dest='sort_files',
help='With --list, sort packages according to how many files they contain.')
self.cmd_opts.add_option(
'--path',
action='append',
dest='paths',
help='Restrict operations to the given paths (may include wildcards).')
self.cmd_opts.add_option(
'-n', '--simulate',
action='store_true',
help='Do not actually perform the zip/unzip operation.')
self.parser.insert_option_group(0, self.cmd_opts)
def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match + '*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug("Skipping path %s because it doesn't match %s"
% (path, ', '.join(self.select_paths)))
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug("Adding path %s because it doesn't match "
"anything already on sys.path" % match)
return result
def run(self, options, args):
logger.deprecated('1.7', "DEPRECATION: 'pip zip' and 'pip unzip` are deprecated, and will be removed in a future release.")
self.select_paths = options.paths
self.simulate = options.simulate
if options.list:
return self.list(options, args)
if not args:
raise InstallationError(
'You must give at least one package to zip or unzip')
packages = []
for arg in args:
module_name, filename = self.find_package(arg)
if options.unzip and os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a zip file; cannot be unzipped'
% (module_name, filename))
elif not options.unzip and not os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a directory; cannot be zipped'
% (module_name, filename))
packages.append((module_name, filename))
last_status = None
for module_name, filename in packages:
if options.unzip:
last_status = self.unzip_package(module_name, filename)
else:
last_status = self.zip_package(module_name, filename, options.no_pyc)
return last_status
def unzip_package(self, module_name, filename):
zip_filename = os.path.dirname(filename)
if not os.path.isfile(zip_filename) and zipfile.is_zipfile(zip_filename):
raise InstallationError(
'Module %s (in %s) isn\'t located in a zip file in %s'
% (module_name, filename, zip_filename))
package_path = os.path.dirname(zip_filename)
if not package_path in self.paths():
logger.warn(
'Unpacking %s into %s, but %s is not on sys.path'
% (display_path(zip_filename), display_path(package_path),
display_path(package_path)))
logger.notify('Unzipping %s (in %s)' % (module_name, display_path(zip_filename)))
if self.simulate:
logger.notify('Skipping remaining operations because of --simulate')
return
logger.indent += 2
try:
## FIXME: this should be undoable:
zip = zipfile.ZipFile(zip_filename)
to_save = []
for info in zip.infolist():
name = info.filename
if name.startswith(module_name + os.path.sep):
content = zip.read(name)
dest = os.path.join(package_path, name)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not content and dest.endswith(os.path.sep):
if not os.path.exists(dest):
os.makedirs(dest)
else:
f = open(dest, 'wb')
f.write(content)
f.close()
else:
to_save.append((name, zip.read(name)))
zip.close()
if not to_save:
logger.info('Removing now-empty zip file %s' % display_path(zip_filename))
os.unlink(zip_filename)
self.remove_filename_from_pth(zip_filename)
else:
logger.info('Removing entries in %s/ from zip file %s' % (module_name, display_path(zip_filename)))
zip = zipfile.ZipFile(zip_filename, 'w')
for name, content in to_save:
zip.writestr(name, content)
zip.close()
finally:
logger.indent -= 2
def zip_package(self, module_name, filename, no_pyc):
orig_filename = filename
logger.notify('Zip %s (in %s)' % (module_name, display_path(filename)))
logger.indent += 2
if filename.endswith('.egg'):
dest_filename = filename
else:
dest_filename = filename + '.zip'
try:
## FIXME: I think this needs to be undoable:
if filename == dest_filename:
filename = backup_dir(orig_filename)
logger.notify('Moving %s aside to %s' % (orig_filename, filename))
if not self.simulate:
shutil.move(orig_filename, filename)
try:
logger.info('Creating zip file in %s' % display_path(dest_filename))
if not self.simulate:
zip = zipfile.ZipFile(dest_filename, 'w')
zip.writestr(module_name + '/', '')
for dirpath, dirnames, filenames in os.walk(filename):
if no_pyc:
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
for fns, is_dir in [(dirnames, True), (filenames, False)]:
for fn in fns:
full = os.path.join(dirpath, fn)
dest = os.path.join(module_name, dirpath[len(filename):].lstrip(os.path.sep), fn)
if is_dir:
zip.writestr(dest + '/', '')
else:
zip.write(full, dest)
zip.close()
logger.info('Removing old directory %s' % display_path(filename))
if not self.simulate:
rmtree(filename)
except:
## FIXME: need to do an undo here
raise
## FIXME: should also be undone:
self.add_filename_to_pth(dest_filename)
finally:
logger.indent -= 2
def remove_filename_from_pth(self, filename):
for pth in self.pth_files():
f = open(pth, 'r')
lines = f.readlines()
f.close()
new_lines = [
l for l in lines if l.strip() != filename]
if lines != new_lines:
logger.info('Removing reference to %s from .pth file %s'
% (display_path(filename), display_path(pth)))
if not [line for line in new_lines if line]:
logger.info('%s file would be empty: deleting' % display_path(pth))
if not self.simulate:
os.unlink(pth)
else:
if not self.simulate:
f = open(pth, 'wb')
f.writelines(new_lines)
f.close()
return
logger.warn('Cannot find a reference to %s in any .pth file' % display_path(filename))
def add_filename_to_pth(self, filename):
path = os.path.dirname(filename)
dest = filename + '.pth'
if path not in self.paths():
logger.warn('Adding .pth file %s, but it is not on sys.path' % display_path(dest))
if not self.simulate:
if os.path.exists(dest):
f = open(dest)
lines = f.readlines()
f.close()
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.append(filename + '\n')
else:
lines = [filename + '\n']
f = open(dest, 'wb')
f.writelines(lines)
f.close()
def pth_files(self):
for path in self.paths():
if not os.path.exists(path) or not os.path.isdir(path):
continue
for filename in os.listdir(path):
if filename.endswith('.pth'):
yield os.path.join(path, filename)
def find_package(self, package):
for path in self.paths():
full = os.path.join(path, package)
if os.path.exists(full):
return package, full
if not os.path.isdir(path) and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
try:
zip.read(os.path.join(package, '__init__.py'))
except KeyError:
pass
else:
zip.close()
return package, full
zip.close()
## FIXME: need special error for package.py case:
raise InstallationError(
'No package with the name %s found' % package)
def list(self, options, args):
if args:
raise InstallationError(
'You cannot give an argument with --list')
for path in sorted(self.paths()):
if not os.path.exists(path):
continue
basename = os.path.basename(path.rstrip(os.path.sep))
if os.path.isfile(path) and zipfile.is_zipfile(path):
if os.path.dirname(path) not in self.paths():
logger.notify('Zipped egg: %s' % display_path(path))
continue
if (basename != 'site-packages' and basename != 'dist-packages'
and not path.replace('\\', '/').endswith('lib/python')):
continue
logger.notify('In %s:' % display_path(path))
logger.indent += 2
zipped = []
unzipped = []
try:
for filename in sorted(os.listdir(path)):
ext = os.path.splitext(filename)[1].lower()
if ext in ('.pth', '.egg-info', '.egg-link'):
continue
if ext == '.py':
logger.info('Not displaying %s: not a package' % display_path(filename))
continue
full = os.path.join(path, filename)
if os.path.isdir(full):
unzipped.append((filename, self.count_package(full)))
elif zipfile.is_zipfile(full):
zipped.append(filename)
else:
logger.info('Unknown file: %s' % display_path(filename))
if zipped:
logger.notify('Zipped packages:')
logger.indent += 2
try:
for filename in zipped:
logger.notify(filename)
finally:
logger.indent -= 2
else:
logger.notify('No zipped packages.')
if unzipped:
if options.sort_files:
unzipped.sort(key=lambda x: -x[1])
logger.notify('Unzipped packages:')
logger.indent += 2
try:
for filename, count in unzipped:
logger.notify('%s (%i files)' % (filename, count))
finally:
logger.indent -= 2
else:
logger.notify('No unzipped packages.')
finally:
logger.indent -= 2
def count_package(self, path):
total = 0
for dirpath, dirnames, filenames in os.walk(path):
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
total += len(filenames)
return total
|
mit
|
pamoakoy/invenio
|
modules/bibknowledge/lib/bibknowledge.py
|
2
|
18790
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Provide API-callable functions for knowledge base management (using kb's).
"""
from invenio import bibknowledge_dblayer
from invenio.bibformat_config import CFG_BIBFORMAT_ELEMENTS_PATH
from invenio.config import CFG_WEBDIR
import os
import sys
import re
if sys.hexversion < 0x2060000:
try:
import simplejson as json
except ImportError:
# Okay, no Ajax app will be possible, but continue anyway,
# since this package is only recommended, not mandatory.
pass
else:
import json
def get_kb_mappings(kb_name="", key="", value="", match_type="s"):
"""Get leftside/rightside mappings from kb kb_name.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a list of mappings
"""
return bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default=""):
"""Get one unique mapping. If not found, return default
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a mapping
"""
mappings = bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
if len(mappings) == 0:
return default
else:
return mappings[0]
def add_kb_mapping(kb_name, key, value=""):
"""
Adds a new mapping to given kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
@param value: the value of the mapping
"""
bibknowledge_dblayer.add_kb_mapping(kb_name, key, value)
def remove_kb_mapping(kb_name, key):
"""
Delete an existing kb mapping in kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
"""
bibknowledge_dblayer.remove_kb_mapping(kb_name, key)
def update_kb_mapping(kb_name, old_key, key, value):
"""
Update an existing kb mapping with key old_key with a new key and value
@param kb_name: the name of the kb where to insert the new value
@param old_key: the key of the mapping in the kb
@param key: the new key of the mapping
@param value: the new value of the mapping
"""
#check if this is a KEY change or a VALUE change.
if (old_key == key):
#value change, ok to change
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
else:
#you can change a key unless there is already a key like that
if kb_mapping_exists(kb_name, key):
pass #no, don't change
else:
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
def get_kb_mappings_json(kb_name="", key="", value="", match_type="s", limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@param limit: maximum number of results to return (are ALL if set to None)
@return a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
for m in mappings:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': value})
if limit is not None:
ret = ret[:limit]
return json.dumps(ret)
def kb_exists(kb_name):
"""Returns True if a kb with the given name exists
@param kb_name: the name of the knowledge base
"""
return bibknowledge_dblayer.kb_exists(kb_name)
def get_kb_name(kb_id):
"""
Returns the name of the kb given by id
@param kb_id: the id of the knowledge base
"""
return bibknowledge_dblayer.get_kb_name(kb_id)
def update_kb_attributes(kb_name, new_name, new_description=''):
"""Update kb kb_name with a new name and (optionally) description
@param kb_name: the name of the kb to update
@param new_name: the new name for the kb
@param new_description: the new description for the kb
"""
bibknowledge_dblayer.update_kb(kb_name, new_name, new_description)
def add_kb(kb_name="Untitled", kb_type=None):
"""
Adds a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
@param kb_name: the name of the kb
@param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
@return the id of the newly created kb
"""
name = kb_name
i = 1
while bibknowledge_dblayer.kb_exists(name):
name = kb_name + " " + str(i)
i += 1
kb_id = bibknowledge_dblayer.add_kb(name, "", kb_type)
return kb_id
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
"""A convenience method"""
kb_id = add_kb(kb_name=kbname, kb_type='dynamic')
bibknowledge_dblayer.save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
def kb_mapping_exists(kb_name, key):
"""
Returns the information if a mapping exists.
@param kb_name: knowledge base name
@param key: left side (mapFrom)
"""
return bibknowledge_dblayer.kb_mapping_exists(kb_name, key)
def delete_kb(kb_name):
"""
Deletes given kb from database
@param kb_name: knowledge base name
"""
bibknowledge_dblayer.delete_kb(kb_name)
def get_kb_id(kb_name):
"""
Gets the id by name
@param kb_name knowledge base name
"""
return bibknowledge_dblayer.get_kb_id(kb_name)
# Knowledge Bases Dependencies
##
def get_elements_that_use_kb(name):
"""
This routine is obsolete.
Returns a list of elements that call given kb
[ {'filename':"filename_1.py"
'name': "a name"
},
...
]
Returns elements sorted by name
"""
format_elements = {}
#Retrieve all elements in files
files = os.listdir(CFG_BIBFORMAT_ELEMENTS_PATH)
for filename in files:
if filename.endswith(".py"):
path = CFG_BIBFORMAT_ELEMENTS_PATH + os.sep + filename
formatf = open(path, 'r')
code = formatf.read()
formatf.close()
# Search for use of kb inside code
kb_pattern = re.compile('''
(bfo.kb)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<kb>%s) #kb
[\'"]+\s* #Single or double quote
, #comma
''' % name, re.VERBOSE | re.MULTILINE | re.IGNORECASE)
result = kb_pattern.search(code)
if result is not None:
name = ("".join(filename.split(".")[:-1])).lower()
if name.startswith("bfe_"):
name = name[4:]
format_elements[name] = {'filename':filename, 'name': name}
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
###kb functions for export
def get_kbs_info(kbtype="", searchkbname=""):
"""A convenience method that calls dblayer
@param kbtype: type of kb -- get only kb's of this type
@param searchkbname: get only kb's where this sting appears in the name
"""
return bibknowledge_dblayer.get_kbs_info(kbtype, searchkbname)
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""
Returns an array of values "authority file" type = just values.
@param kb_name: name of kb
@param searchname: get these values, according to searchtype
@param searchtype: s=substring, e=exact
"""
return bibknowledge_dblayer.get_kba_values(kb_name, searchname, searchtype)
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns an array of keys.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
"""
return bibknowledge_dblayer.get_kbr_keys(kb_name, searchkey,
searchvalue, searchtype)
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Return a tuple of values from key-value mapping kb.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s=substring; e=exact
"""
return bibknowledge_dblayer.get_kbr_values(kb_name, searchkey,
searchvalue, searchtype)
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns a list of dictionaries that match the search.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
@return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
return bibknowledge_dblayer.get_kbr_items(kb_name, searchkey,
searchvalue, searchtype)
def get_kbd_values(kbname, searchwith=""):
"""Return a list of values by searching a dynamic kb.
@param kbname: name of the knowledge base
@param searchwith: a term to search with
"""
import search_engine
#first check that the kb in question is dynamic
kbid = bibknowledge_dblayer.get_kb_id(kbname)
if not kbid:
return []
kbtype = bibknowledge_dblayer.get_kb_type(kbid)
if not kbtype:
return []
if kbtype != 'd':
return []
#get the configuration so that we see what the field is
confdict = bibknowledge_dblayer.get_kb_dyn_config(kbid)
if not confdict:
return []
if not confdict.has_key('field'):
return []
field = confdict['field']
expression = confdict['expression']
collection = ""
if confdict.has_key('collection'):
collection = confdict['collection']
reclist = [] #return this
if searchwith and expression:
if (expression.count('%') > 0):
expression = expression.replace("%", searchwith)
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
#no %.. just make a combination
expression = expression + " and "+searchwith
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else: #either no expr or no searchwith.. but never mind about searchwith
if expression: #in this case: only expression
reclist = search_engine.perform_request_search(p=expression, cc=collection)
else:
#make a fake expression so that only records that have this field
#will be returned
fake_exp = "/.*/"
if searchwith:
fake_exp = searchwith
reclist = search_engine.perform_request_search(f=field, p=fake_exp, cc=collection)
if reclist:
fieldvaluelist = search_engine.get_most_popular_field_values(reclist,
field)
val_list = []
for f in fieldvaluelist:
(val, dummy) = f
val_list.append(val)
return val_list
return [] #in case nothing worked
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
@param kbname: name of the knowledge base
@param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
def get_kbd_values_for_bibedit(tag, collection="", searchwith="", expression=""):
"""
Dynamically create a dynamic KB for a specific search; search; then destroy it.
This probably isn't the method you want.
Example1: tag=100__a : return values of 100__a
Example2: tag=100__a, searchwith=Jill: return values of 100__a that match with Jill
Example3: tag=100__a, searchwith=Ellis, expression="700__a:*%*: return values of
100__a for which Ellis matches some 700__a
Note: the performace of this function is ok compared to a plain
perform_request_search / get most popular fields -pair. The overhead
is about 5% with large record sets; the lookups are the xpensive part.
@param tag: the tag like 100__a
@param collection: collection id
@param searchwith: the string to search. If empty, match all.
@param expression: the search expression for perform_request_search; if
present, '%' is substituted with /searcwith/. If absent,
/searchwith/ is searched for in /tag/.
"""
dkbname = "tmp_dynamic_"+tag+'_'+expression
kb_id = add_kb(kb_name=dkbname, kb_type='dynamic')
#get the kb name since it may be catenated by a number
#in case there are concurrent calls.
kb_name = get_kb_name(kb_id)
bibknowledge_dblayer.save_kb_dyn_config(kb_id, tag, expression, collection)
#now, get stuff
myvalues = get_kbd_values(kb_name, searchwith)
#the tmp dyn kb is now useless, delete it
delete_kb(kb_name)
return myvalues
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile. If searchwith is defined,
return only items that match with it.
@param taxonomyfilename: full path+name of the RDF file
@param templatefile: full path+name of the XSLT file
@param searchwith: a term to search with
"""
import libxml2
import libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
def get_kbt_items_for_bibedit(kbtname, tag="", searchwith=""):
"""
A simplifield, customized version of the function get_kbt_items.
Traverses an RDF document. By default returns all leaves. If
tag defined returns the content of that tag.
If searchwith defined, returns leaves that match it.
Warning! In order to make this faster, the matching field values
cannot be multi-line!
@param kbtname: name of the taxonony kb
@param tag: name of tag whose content
@param searchwith: a term to search with
"""
import libxml2
import libxslt
#get the actual file based on the kbt name
kb_id = get_kb_id(kbtname)
if not kb_id:
return []
#get the rdf file..
rdfname = CFG_WEBDIR+"/kbfiles/"+str(kb_id)+".rdf"
if not os.path.exists(rdfname):
return []
#parse the doc with static xslt
styledoc = libxml2.parseDoc("""
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<xsl:output method="xml" standalone="yes" omit-xml-declaration="yes" indent="no"/>
<xsl:template match="rdf:RDF">
<foo><!--just having some tag here speeds up output by 10x-->
<xsl:apply-templates />
</foo>
</xsl:template>
<xsl:template match="*">
<!--hi><xsl:value-of select="local-name()"/></hi-->
<xsl:if test="local-name()='"""+tag+"""'">
<myout><xsl:value-of select="normalize-space(.)"/></myout>
</xsl:if>
<!--traverse down in tree!-->
<xsl:text>
</xsl:text>
<xsl:apply-templates />
</xsl:template>
</xsl:stylesheet>
""")
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(rdfname)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
#take only those with myout..
if line.count("<myout>") > 0:
#remove the myout tag..
line = line[9:]
line = line[:-8]
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
ritems.append(line)
return ritems
if __name__ == "__main__":
pass
|
gpl-2.0
|
tdtrask/ansible
|
lib/ansible/plugins/lookup/password.py
|
18
|
12098
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2013, Javier Candeira <javier@candeira.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: password
version_added: "1.1"
author:
- Daniel Hokka Zakrisson <daniel@hozac.com>
- Javier Candeira <javier@candeira.com>
- Maykel Moya <mmoya@speedyrails.com>
short_description: retrieve or generate a random password, stored in a file
description:
- generates a random plaintext password and stores it in a file at a given filepath.
- If the file exists previously, it will retrieve its contents, behaving just like with_file.
- 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
which simplifies password management in C("host_vars") variables.'
- A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
options:
_terms:
description:
- path to the file that stores/will store the passwords
required: True
encrypt:
description:
- Whether the user requests that this password is returned encrypted or in plain text.
- Note that the password is always stored as plain text.
- Encrypt also forces saving the salt value for idempotence.
type: boolean
default: True
chars:
version_added: "1.4"
description:
- Define comma separeted list of names that compose a custom character set in the generated passwords.
- 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9 and punctuation (". , : - _").'
- "They can be either parts of Python's string module attributes (ascii_letters,digits, etc) or are used literally ( :, -)."
- "To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double quotes are not supported."
type: string
length:
description: The length of the generated password.
default: 20
type: integer
notes:
- A great alternative to the password lookup plugin,
if you don't need to generate random passwords on a per-host basis,
would be to use Vault in playbooks.
Read the documentation there and consider using it first,
it will be more desirable for most applications.
- If the file already exists, no data will be written to it.
If the file has contents, those contents will be read in as the password.
Empty files cause the password to return as an empty string.
- 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
the target file must be readable by the playbook user, or, if it does not exist,
the playbook user must have sufficient privileges to create it.
(So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
"""
EXAMPLES = """
- name: create a mysql user with a random password
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword length=15') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using only ascii letters
mysql_user: name={{ client }} password="{{ lookup('password', '/tmp/passwordfile chars=ascii_letters') }}" priv='{{ client }}_{{ tier }}_{{ role }}.*:ALL'
- name: create a mysql user with a random password using only digits
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', '/tmp/passwordfile chars=digits') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using many different char sets
mysql_user:
name: "{{ client }}"
password" "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import string
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.utils.encrypt import do_encrypt, random_password
from ansible.utils.path import makedirs_safe
DEFAULT_LENGTH = 20
VALID_PARAMS = frozenset(('length', 'encrypt', 'chars'))
def _parse_parameters(term):
"""Hacky parsing of params
See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
and the first_found lookup For how we want to fix this later
"""
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
relpath = term
params = dict()
else:
relpath = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
relpath = u' '.join((relpath, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(relpath):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# Set defaults
params['length'] = int(params.get('length', DEFAULT_LENGTH))
params['encrypt'] = params.get('encrypt', None)
params['chars'] = params.get('chars', None)
if params['chars']:
tmp_chars = []
if u',,' in params['chars']:
tmp_chars.append(u',')
tmp_chars.extend(c for c in params['chars'].replace(u',,', u',').split(u',') if c)
params['chars'] = tmp_chars
else:
# Default chars for password
params['chars'] = [u'ascii_letters', u'digits', u".,:-_"]
return relpath, params
def _read_password_file(b_path):
"""Read the contents of a password file and return it
:arg b_path: A byte string containing the path to the password file
:returns: a text string containing the contents of the password file or
None if no password file was present.
"""
content = None
if os.path.exists(b_path):
with open(b_path, 'rb') as f:
b_content = f.read().rstrip()
content = to_text(b_content, errors='surrogate_or_strict')
return content
def _gen_candidate_chars(characters):
'''Generate a string containing all valid chars as defined by ``characters``
:arg characters: A list of character specs. The character specs are
shorthand names for sets of characters like 'digits', 'ascii_letters',
or 'punctuation' or a string to be included verbatim.
The values of each char spec can be:
* a name of an attribute in the 'strings' module ('digits' for example).
The value of the attribute will be added to the candidate chars.
* a string of characters. If the string isn't an attribute in 'string'
module, the string will be directly added to the candidate chars.
For example::
characters=['digits', '?|']``
will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
the question mark and pipe characters directly. Return will be the string::
u'0123456789?|'
'''
chars = []
for chars_spec in characters:
# getattr from string expands things like "ascii_letters" and "digits"
# into a set of characters.
chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec),
errors='strict'))
chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
return chars
def _random_salt():
"""Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
"""
# Note passlib salt values must be pure ascii so we can't let the user
# configure this
salt_chars = _gen_candidate_chars(['ascii_letters', 'digits', './'])
return random_password(length=8, chars=salt_chars)
def _parse_content(content):
'''parse our password data format into password and salt
:arg content: The data read from the file
:returns: password and salt
'''
password = content
salt = None
salt_slug = u' salt='
try:
sep = content.rindex(salt_slug)
except ValueError:
# No salt
pass
else:
salt = password[sep + len(salt_slug):]
password = content[:sep]
return password, salt
def _format_content(password, salt, encrypt=True):
"""Format the password and salt for saving
:arg password: the plaintext password to save
:arg salt: the salt to use when encrypting a password
:arg encrypt: Whether the user requests that this password is encrypted.
Note that the password is saved in clear. Encrypt just tells us if we
must save the salt value for idempotence. Defaults to True.
:returns: a text string containing the formatted information
.. warning:: Passwords are saved in clear. This is because the playbooks
expect to get cleartext passwords from this lookup.
"""
if not encrypt and not salt:
return password
# At this point, the calling code should have assured us that there is a salt value.
if not salt:
raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
return u'%s salt=%s' % (password, salt)
def _write_password_file(b_path, content):
b_pathdir = os.path.dirname(b_path)
makedirs_safe(b_pathdir, mode=0o700)
with open(b_path, 'wb') as f:
os.chmod(b_path, 0o600)
b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
f.write(b_content)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
relpath, params = _parse_parameters(term)
path = self._loader.path_dwim(relpath)
b_path = to_bytes(path, errors='surrogate_or_strict')
chars = _gen_candidate_chars(params['chars'])
changed = False
content = _read_password_file(b_path)
if content is None or b_path == to_bytes('/dev/null'):
plaintext_password = random_password(params['length'], chars)
salt = None
changed = True
else:
plaintext_password, salt = _parse_content(content)
if params['encrypt'] and not salt:
changed = True
salt = _random_salt()
if changed and b_path != to_bytes('/dev/null'):
content = _format_content(plaintext_password, salt, encrypt=params['encrypt'])
_write_password_file(b_path, content)
if params['encrypt']:
password = do_encrypt(plaintext_password, params['encrypt'], salt=salt)
ret.append(password)
else:
ret.append(plaintext_password)
return ret
|
gpl-3.0
|
bigpete/pyNES
|
pynes/examples/movingsprite_translated.py
|
28
|
1399
|
import pynes
from pynes.game import Game
from pynes.bitbag import *
from pynes.nes_types import *
game = Game()
palette = game.assign('palette',
NesArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,15,
0x0F, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63])
)
sprite = game.assign('sprite', game.call('define_sprite', [128, 128, 0, 3]))
game.assign('chr_asset', NesChrFile('player.chr'))
game.asmFunction("reset")
game.call('wait_vblank')
game.call('clearmem')
game.call('wait_vblank')
game.call('load_palette', [palette])
game.call('load_sprite', [sprite, 0])
game.asmFunction("joypad1_up")
game.minusAssign(game.call('get_sprite', [0]).y, 1)
#game.asmFunction("joypad1_up")
#game.call(load_sprite(sprite, 0))
#game.asmFunction("reset")
#game.call(wait_vblank())
#game.call(clearmem())
#game.call(wait_vblank())
#game.call(load_palette(palette))
game.press_start()
'''
def waitvblank()
asm.bit(0x2002)
asm.bpl(waitvblank)
sprite = define_sprite(128, 128, 0, 3)
def reset():
global palette, sprite
wait_vblank()
clearmem()
wait_vblank()
load_palette(palette)
load_sprite(sprite, 0)
def joypad1_up():
get_sprite(0).y -= 1
def joypad1_down():
get_sprite(0).y += 1
def joypad1_left():
get_sprite(0).x -=1
def joypad1_right():
get_sprite(0).x +=1
'''
|
bsd-3-clause
|
vrutkovs/atomic-reactor
|
tests/test_buildimage.py
|
3
|
1998
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from glob import glob
import os
from atomic_reactor.buildimage import BuildImageBuilder
from atomic_reactor.core import DockerTasker
from tests.constants import MOCK
from tests.util import requires_internet
if MOCK:
from tests.docker_mock import mock_docker
PARENT_DIR = os.path.dirname(os.path.dirname(__file__))
TEST_BUILD_IMAGE = "test-build-image"
def test_tarball_generation_local_repo(tmpdir):
if MOCK:
mock_docker()
b = BuildImageBuilder(reactor_local_path=PARENT_DIR)
tarball_path = b.get_reactor_tarball_path(str(tmpdir))
assert os.path.exists(tarball_path)
assert len(glob(os.path.join(str(tmpdir), 'atomic-reactor-*.tar.gz'))) == 1
@requires_internet
def test_tarball_generation_upstream_repo(tmpdir):
if MOCK:
mock_docker()
b = BuildImageBuilder(use_official_reactor_git=True)
tarball_path = b.get_reactor_tarball_path(str(tmpdir))
assert os.path.exists(tarball_path)
assert len(glob(os.path.join(str(tmpdir), 'atomic-reactor-*.tar.gz'))) == 1
@requires_internet
def test_image_creation_upstream_repo():
if MOCK:
mock_docker()
b = BuildImageBuilder(use_official_reactor_git=True)
df_dir_path = os.path.join(PARENT_DIR, 'images', 'privileged-builder')
b.create_image(df_dir_path, TEST_BUILD_IMAGE)
dt = DockerTasker()
assert dt.image_exists(TEST_BUILD_IMAGE)
dt.remove_image(TEST_BUILD_IMAGE)
def test_image_creation_local_repo():
if MOCK:
mock_docker()
b = BuildImageBuilder(reactor_local_path=PARENT_DIR)
df_dir_path = os.path.join(PARENT_DIR, 'images', 'privileged-builder')
b.create_image(df_dir_path, TEST_BUILD_IMAGE)
dt = DockerTasker()
assert dt.image_exists(TEST_BUILD_IMAGE)
dt.remove_image(TEST_BUILD_IMAGE)
|
bsd-3-clause
|
lumig242/Hue-Integration-with-CDAP
|
desktop/core/ext-py/boto-2.38.0/boto/pyami/startup.py
|
153
|
2475
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import sys
import boto
from boto.utils import find_class
from boto import config
from boto.pyami.scriptbase import ScriptBase
class Startup(ScriptBase):
def run_scripts(self):
scripts = config.get('Pyami', 'scripts')
if scripts:
for script in scripts.split(','):
script = script.strip(" ")
try:
pos = script.rfind('.')
if pos > 0:
mod_name = script[0:pos]
cls_name = script[pos + 1:]
cls = find_class(mod_name, cls_name)
boto.log.info('Running Script: %s' % script)
s = cls()
s.main()
else:
boto.log.warning('Trouble parsing script: %s' % script)
except Exception as e:
boto.log.exception('Problem Running Script: %s. Startup process halting.' % script)
raise e
def main(self):
self.run_scripts()
self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id'))
if __name__ == "__main__":
if not config.has_section('loggers'):
boto.set_file_logger('startup', '/var/log/boto.log')
sys.path.append(config.get('Pyami', 'working_dir'))
su = Startup()
su.main()
|
apache-2.0
|
hortonworks/hortonworks-sandbox
|
desktop/core/ext-py/Mako-0.7.2/test/test_lexer.py
|
2
|
28620
|
import unittest
from mako.lexer import Lexer
from mako import exceptions, util
from util import flatten_result, result_lines
from mako.template import Template
import re
from test import TemplateTest, template_base, skip_if, eq_, assert_raises_message
# create fake parsetree classes which are constructed
# exactly as the repr() of a real parsetree object.
# this allows us to use a Python construct as the source
# of a comparable repr(), which is also hit by the 2to3 tool.
def repr_arg(x):
if isinstance(x, dict):
return util.sorted_dict_repr(x)
else:
return repr(x)
from mako import parsetree
for cls in parsetree.__dict__.values():
if isinstance(cls, type) and \
issubclass(cls, parsetree.Node):
clsname = cls.__name__
exec ("""
class %s(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
return "%%s(%%s)" %% (
self.__class__.__name__,
", ".join(repr_arg(x) for x in self.args)
)
""" % clsname) in locals()
# NOTE: most assertion expressions were generated, then formatted
# by PyTidy, hence the dense formatting.
class LexerTest(TemplateTest):
def _compare(self, node, expected):
eq_(repr(node), repr(expected))
def test_text_and_tag(self):
template = """
<b>Hello world</b>
<%def name="foo()">
this is a def.
</%def>
and some more text.
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({},
[Text(u'''\n<b>Hello world</b>\n ''', (1,
1)), DefTag(u'def', {u'name': u'foo()'}, (3, 9),
[Text(u'''\n this is a def.\n ''',
(3, 28))]),
Text(u'''\n \n and some more text.\n''',
(5, 16))]))
def test_unclosed_tag(self):
template = """
<%def name="foo()">
other text
"""
try:
nodes = Lexer(template).parse()
assert False
except exceptions.SyntaxException, e:
assert str(e) == "Unclosed tag: <%def> at line: 5 char: 9"
def test_onlyclosed_tag(self):
template = \
"""
<%def name="foo()">
foo
</%def>
</%namespace>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_noexpr_allowed(self):
template = \
"""
<%namespace name="${foo}"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_unmatched_tag(self):
template = \
"""
<%namespace name="bar">
<%def name="foo()">
foo
</%namespace>
</%def>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_nonexistent_tag(self):
template = """
<%lala x="5"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_wrongcase_tag(self):
template = \
"""
<%DEF name="foo()">
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_percent_escape(self):
template = \
"""
%% some whatever.
%% more some whatever
% if foo:
% endif
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'''\n \n''',
(1, 1)), Text(u'''% some whatever.\n\n''', (3, 2)),
Text(u' %% more some whatever\n', (5, 2)),
ControlLine(u'if', u'if foo:', False, (6, 1)),
ControlLine(u'if', u'endif', True, (7, 1)),
Text(u' ', (8, 1))]))
def test_text_tag(self):
template = \
"""
## comment
% if foo:
hi
% endif
<%text>
# more code
% more code
<%illegal compionent>/></>
<%def name="laal()">def</%def>
</%text>
<%def name="foo()">this is foo</%def>
% if bar:
code
% endif
"""
node = Lexer(template).parse()
self._compare(node,
TemplateNode({}, [Text(u'\n', (1, 1)),
Comment(u'comment', (2, 1)),
ControlLine(u'if', u'if foo:', False, (3, 1)),
Text(u' hi\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u' ', (6, 1)), TextTag(u'text', {},
(6, 9),
[Text(u'''\n # more code\n '''
'''\n % more code\n '''
'''<%illegal compionent>/></>\n '''
'''<%def name="laal()">def</%def>\n '''
''' \n \n ''',
(6, 16))]), Text(u'''
''', (14, 17)),
DefTag(u'def', {u'name': u'foo()'}, (16, 9),
[Text(u'this is foo', (16, 28))]),
Text(u'''\n \n''', (16, 46)),
ControlLine(u'if', u'if bar:', False, (18, 1)),
Text(u' code\n', (19, 1)),
ControlLine(u'if', u'endif', True, (20, 1)),
Text(u' ', (21, 1))]))
def test_def_syntax(self):
template = \
"""
<%def lala>
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_def_syntax_2(self):
template = \
"""
<%def name="lala">
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_whitespace_equals(self):
template = \
"""
<%def name = "adef()" >
adef
</%def>
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'\n ',
(1, 1)), DefTag(u'def', {u'name': u'adef()'}, (2,
13),
[Text(u'''\n adef\n ''',
(2, 36))]), Text(u'\n ', (4, 20))]))
def test_ns_tag_closed(self):
template = \
"""
<%self:go x="1" y="2" z="${'hi' + ' ' + 'there'}"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'2', u'z': u"${'hi' + ' ' + 'there'}"}, (3,
13), []), Text(u'\n ', (3, 64))]))
def test_ns_tag_empty(self):
template = \
"""
<%form:option value=""></%form:option>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallNamespaceTag(u'form:option',
{u'value': u''}, (2, 13), []), Text(u'\n '
, (2, 51))]))
def test_ns_tag_open(self):
template = \
"""
<%self:go x="1" y="${process()}">
this is the body
</%self:go>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'${process()}'}, (3, 13),
[Text(u'''
this is the body
''',
(3, 46))]), Text(u'\n ', (5, 24))]))
def test_expr_in_attribute(self):
"""test some slightly trickier expressions.
you can still trip up the expression parsing, though, unless we
integrated really deeply somehow with AST."""
template = \
"""
<%call expr="foo>bar and 'lala' or 'hoho'"/>
<%call expr='foo<bar and hoho>lala and "x" + "y"'/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallTag(u'call', {u'expr'
: u"foo>bar and 'lala' or 'hoho'"}, (2, 13), []),
Text(u'\n ', (2, 57)), CallTag(u'call'
, {u'expr': u'foo<bar and hoho>lala and "x" + "y"'
}, (3, 13), []), Text(u'\n ', (3, 64))]))
def test_pagetag(self):
template = \
"""
<%page cached="True", args="a, b"/>
some template
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), PageTag(u'page', {u'args': u'a, b',
u'cached': u'True'}, (2, 13), []),
Text(u'''
some template
''',
(2, 48))]))
def test_nesting(self):
template = \
"""
<%namespace name="ns">
<%def name="lala(hi, there)">
<%call expr="something()"/>
</%def>
</%namespace>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'ns'}, (3,
9), [Text(u'\n ', (3, 31)),
DefTag(u'def', {u'name': u'lala(hi, there)'}, (4,
13), [Text(u'\n ', (4, 42)),
CallTag(u'call', {u'expr': u'something()'}, (5,
17), []), Text(u'\n ', (5, 44))]),
Text(u'\n ', (6, 20))]),
Text(u'''
''', (7, 22))]))
if util.py3k:
def test_code(self):
template = \
"""text
<%
print("hi")
for x in range(1,5):
print(x)
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint("hi")\nfor x in range(1,5):\n '
'print(x)\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
else:
def test_code(self):
template = \
"""text
<%
print "hi"
for x in range(1,5):
print x
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint "hi"\nfor x in range(1,5):\n '
'print x\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
def test_code_and_tags(self):
template = \
"""
<%namespace name="foo">
<%def name="x()">
this is x
</%def>
<%def name="y()">
this is y
</%def>
</%namespace>
<%
result = []
data = get_data()
for x in data:
result.append(x+7)
%>
result: <%call expr="foo.x(result)"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'foo'}, (2,
1), [Text(u'\n ', (2, 24)), DefTag(u'def',
{u'name': u'x()'}, (3, 5),
[Text(u'''\n this is x\n ''', (3, 22))]),
Text(u'\n ', (5, 12)), DefTag(u'def', {u'name'
: u'y()'}, (6, 5),
[Text(u'''\n this is y\n ''', (6, 22))]),
Text(u'\n', (8, 12))]), Text(u'''\n\n''', (9, 14)),
Code(u'''\nresult = []\ndata = get_data()\n'''
'''for x in data:\n result.append(x+7)\n\n''',
False, (11, 1)), Text(u'''\n\n result: ''', (16,
3)), CallTag(u'call', {u'expr': u'foo.x(result)'
}, (18, 13), []), Text(u'\n', (18, 42))]))
def test_expression(self):
template = \
"""
this is some ${text} and this is ${textwith | escapes, moreescapes}
<%def name="hi()">
give me ${foo()} and ${bar()}
</%def>
${hi()}
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'\n this is some ', (1, 1)),
Expression(u'text', [], (2, 22)),
Text(u' and this is ', (2, 29)),
Expression(u'textwith ', ['escapes', 'moreescapes'
], (2, 42)), Text(u'\n ', (2, 76)),
DefTag(u'def', {u'name': u'hi()'}, (3, 9),
[Text(u'\n give me ', (3, 27)),
Expression(u'foo()', [], (4, 21)), Text(u' and ',
(4, 29)), Expression(u'bar()', [], (4, 34)),
Text(u'\n ', (4, 42))]), Text(u'\n '
, (5, 16)), Expression(u'hi()', [], (6, 9)),
Text(u'\n', (6, 16))]))
def test_tricky_expression(self):
template = """
${x and "|" or "hi"}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u'x and "|" or "hi"', [], (3, 13)),
Text(u'\n ', (3, 33))
])
)
template = """
${hello + '''heres '{|}' text | | }''' | escape1}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u"hello + '''heres '{|}' text | | }''' ",
['escape1'], (3, 13)),
Text(u'\n ', (3, 62))
])
)
def test_tricky_code(self):
if util.py3k:
template = """<% print('hi %>') %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print('hi %>') \n", False, (1, 1))]))
else:
template = """<% print 'hi %>' %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print 'hi %>' \n", False, (1, 1))]))
def test_tricky_code_2(self):
template = \
"""<%
# someone's comment
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
# someone's comment
""",
False, (1, 1)), Text(u'\n ', (3, 11))]))
if util.py3k:
def test_tricky_code_3(self):
template = \
"""<%
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,
11))]))
else:
def test_tricky_code_3(self):
template = \
"""<%
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""\nprint 'hi'\n# this is a comment\n"""
"""# another comment\nx = 7 """
"""# someone's '''comment\nprint '''\n """
"""there\n '''\n# someone else's """
"""comment\n \n""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,11))]))
def test_tricky_code_4(self):
template = \
"""<% foo = "\\"\\\\" %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""foo = "\\"\\\\" \n""",
False, (1, 1))]))
def test_tricky_code_5(self):
template = \
"""before ${ {'key': 'value'} } after"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'before ', (1, 1)),
Expression(u" {'key': 'value'} ", [], (1, 8)),
Text(u' after', (1, 29))]))
def test_control_lines(self):
template = \
"""
text text la la
% if foo():
mroe text la la blah blah
% endif
and osme more stuff
% for l in range(1,5):
tex tesl asdl l is ${l} kfmas d
% endfor
tetx text
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\ntext text la la\n''', (1, 1)),
ControlLine(u'if', u'if foo():', False, (3, 1)),
Text(u' mroe text la la blah blah\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u'''\n and osme more stuff\n''', (6,
1)), ControlLine(u'for', u'for l in range(1,5):',
False, (8, 1)), Text(u' tex tesl asdl l is ',
(9, 1)), Expression(u'l', [], (9, 24)),
Text(u' kfmas d\n', (9, 28)), ControlLine(u'for',
u'endfor', True, (10, 1)),
Text(u''' tetx text\n \n''', (11, 1))]))
def test_control_lines_2(self):
template = \
"""% for file in requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [ControlLine(u'for',
u"for file in requestattr['toc'].filenames:",
False, (1, 1)), Text(u' x\n', (2, 1)),
ControlLine(u'for', u'endfor', True, (3, 1))]))
def test_long_control_lines(self):
template = \
"""
% for file in \\
requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n', (1, 1)),
ControlLine(u'for', u"for file in \\\n "
"requestattr['toc'].filenames:",
False, (2, 1)),
Text(u' x\n', (4, 1)),
ControlLine(u'for', u'endfor', True, (5, 1)),
Text(u' ', (6, 1))
])
)
def test_unmatched_control(self):
template = """
% if foo:
% for x in range(1,5):
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endif' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_unmatched_control_2(self):
template = """
% if foo:
% for x in range(1,5):
% endfor
"""
assert_raises_message(
exceptions.SyntaxException,
"Unterminated control keyword: 'if' at line: 3 char: 1",
Lexer(template).parse
)
def test_unmatched_control_3(self):
template = """
% if foo:
% for x in range(1,5):
% endlala
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endlala' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_ternary_control(self):
template = \
"""
% if x:
hi
% elif y+7==10:
there
% elif lala:
lala
% else:
hi
% endif
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x:', False, (2, 1)),
Text(u' hi\n', (3, 1)),
ControlLine(u'elif', u'elif y+7==10:', False, (4,
1)), Text(u' there\n', (5, 1)),
ControlLine(u'elif', u'elif lala:', False, (6,
1)), Text(u' lala\n', (7, 1)),
ControlLine(u'else', u'else:', False, (8, 1)),
Text(u' hi\n', (9, 1)),
ControlLine(u'if', u'endif', True, (10, 1))]))
def test_integration(self):
template = \
"""<%namespace name="foo" file="somefile.html"/>
## inherit from foobar.html
<%inherit file="foobar.html"/>
<%def name="header()">
<div>header</div>
</%def>
<%def name="footer()">
<div> footer</div>
</%def>
<table>
% for j in data():
<tr>
% for x in j:
<td>Hello ${x| h}</td>
% endfor
</tr>
% endfor
</table>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [NamespaceTag(u'namespace'
, {u'file': u'somefile.html', u'name': u'foo'},
(1, 1), []), Text(u'\n', (1, 46)),
Comment(u'inherit from foobar.html', (2, 1)),
InheritTag(u'inherit', {u'file': u'foobar.html'},
(3, 1), []), Text(u'''\n\n''', (3, 31)),
DefTag(u'def', {u'name': u'header()'}, (5, 1),
[Text(u'''\n <div>header</div>\n''', (5,
23))]), Text(u'\n', (7, 8)), DefTag(u'def',
{u'name': u'footer()'}, (8, 1),
[Text(u'''\n <div> footer</div>\n''', (8,
23))]), Text(u'''\n\n<table>\n''', (10, 8)),
ControlLine(u'for', u'for j in data():', False,
(13, 1)), Text(u' <tr>\n', (14, 1)),
ControlLine(u'for', u'for x in j:', False, (15,
1)), Text(u' <td>Hello ', (16, 1)),
Expression(u'x', ['h'], (16, 23)), Text(u'</td>\n'
, (16, 30)), ControlLine(u'for', u'endfor', True,
(17, 1)), Text(u' </tr>\n', (18, 1)),
ControlLine(u'for', u'endfor', True, (19, 1)),
Text(u'</table>\n', (20, 1))]))
def test_comment_after_statement(self):
template = \
"""
% if x: #comment
hi
% else: #next
hi
% endif #end
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x: #comment', False, (2,
1)), Text(u' hi\n', (3, 1)),
ControlLine(u'else', u'else: #next', False, (4,
1)), Text(u' hi\n', (5, 1)),
ControlLine(u'if', u'endif #end', True, (6, 1))]))
def test_crlf(self):
template = open(self._file_path("crlf.html"), 'rb').read()
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'<html>\r\n\r\n', (1, 1)),
PageTag(u'page', {
u'args': u"a=['foo',\n 'bar']"
}, (3, 1), []),
Text(u'\r\n\r\nlike the name says.\r\n\r\n', (4, 26)),
ControlLine(u'for', u'for x in [1,2,3]:', False, (8, 1)),
Text(u' ', (9, 1)),
Expression(u'x', [], (9, 9)),
ControlLine(u'for', u'endfor', True, (10, 1)),
Text(u'\r\n', (11, 1)),
Expression(u"trumpeter == 'Miles' and "
"trumpeter or \\\n 'Dizzy'",
[], (12, 1)),
Text(u'\r\n\r\n', (13, 15)),
DefTag(u'def', {u'name': u'hi()'}, (15, 1), [
Text(u'\r\n hi!\r\n', (15, 19))]),
Text(u'\r\n\r\n</html>\r\n', (17, 8))
])
)
assert flatten_result(Template(template).render()) \
== """<html> like the name says. 1 2 3 Dizzy </html>"""
def test_comments(self):
template = \
"""
<style>
#someselector
# other non comment stuff
</style>
## a comment
# also not a comment
## this is a comment
this is ## not a comment
<%doc> multiline
comment
</%doc>
hi
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\n<style>\n #someselector\n # '''
'''other non comment stuff\n</style>\n''',
(1, 1)), Comment(u'a comment', (6, 1)),
Text(u'''\n# also not a comment\n\n''', (7, 1)),
Comment(u'this is a comment', (10, 1)),
Text(u''' \nthis is ## not a comment\n\n''', (11,
1)), Comment(u''' multiline\ncomment\n''', (14,
1)), Text(u'''
hi
''', (16, 8))]))
def test_docs(self):
template = \
"""
<%doc>
this is a comment
</%doc>
<%def name="foo()">
<%doc>
this is the foo func
</%doc>
</%def>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [Text(u'\n ', (1,
1)),
Comment(u'''\n this is a comment\n ''',
(2, 9)), Text(u'\n ', (4, 16)),
DefTag(u'def', {u'name': u'foo()'}, (5, 9),
[Text(u'\n ', (5, 28)),
Comment(u'''\n this is the foo func\n'''
''' ''',
(6, 13)), Text(u'\n ', (8, 20))]),
Text(u'\n ', (9, 16))]))
def test_preprocess(self):
def preproc(text):
return re.sub(r'(?<=\n)\s*#[^#]', '##', text)
template = \
"""
hi
# old style comment
# another comment
"""
nodes = Lexer(template, preprocessor=preproc).parse()
self._compare(nodes, TemplateNode({}, [Text(u'''\n hi\n''',
(1, 1)), Comment(u'old style comment', (3, 1)),
Comment(u'another comment', (4, 1))]))
|
apache-2.0
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/python/systemd.py
|
42
|
2784
|
# -*- test-case-name: twisted.python.test.test_systemd -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Integration with systemd.
Currently only the minimum APIs necessary for using systemd's socket activation
feature are supported.
"""
__all__ = ['ListenFDs']
from os import getpid
class ListenFDs(object):
"""
L{ListenFDs} provides access to file descriptors inherited from systemd.
Typically L{ListenFDs.fromEnvironment} should be used to construct a new
instance of L{ListenFDs}.
@cvar _START: File descriptors inherited from systemd are always
consecutively numbered, with a fixed lowest "starting" descriptor. This
gives the default starting descriptor. Since this must agree with the
value systemd is using, it typically should not be overridden.
@type _START: C{int}
@ivar _descriptors: A C{list} of C{int} giving the descriptors which were
inherited.
"""
_START = 3
def __init__(self, descriptors):
"""
@param descriptors: The descriptors which will be returned from calls to
C{inheritedDescriptors}.
"""
self._descriptors = descriptors
@classmethod
def fromEnvironment(cls, environ=None, start=None):
"""
@param environ: A dictionary-like object to inspect to discover
inherited descriptors. By default, C{None}, indicating that the
real process environment should be inspected. The default is
suitable for typical usage.
@param start: An integer giving the lowest value of an inherited
descriptor systemd will give us. By default, C{None}, indicating
the known correct (that is, in agreement with systemd) value will be
used. The default is suitable for typical usage.
@return: A new instance of C{cls} which can be used to look up the
descriptors which have been inherited.
"""
if environ is None:
from os import environ
if start is None:
start = cls._START
descriptors = []
try:
pid = int(environ['LISTEN_PID'])
except (KeyError, ValueError):
pass
else:
if pid == getpid():
try:
count = int(environ['LISTEN_FDS'])
except (KeyError, ValueError):
pass
else:
descriptors = range(start, start + count)
del environ['LISTEN_PID'], environ['LISTEN_FDS']
return cls(descriptors)
def inheritedDescriptors(self):
"""
@return: The configured list of descriptors.
"""
return list(self._descriptors)
|
gpl-3.0
|
MissingNoIOI/reddit2ebook
|
reddit2ebook/ebooklib_patched/__init__.py
|
4
|
1381
|
# This file is part of EbookLib.
# Copyright (c) 2013 Aleksandar Erkalovic <aerkalov@gmail.com>
#
# EbookLib is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EbookLib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with EbookLib. If not, see <http://www.gnu.org/licenses/>.
# Version of ebook library
VERSION = (0, 15, 0)
# LIST OF POSSIBLE ITEMS
ITEM_UNKNOWN = 0
ITEM_IMAGE = 1
ITEM_STYLE = 2
ITEM_SCRIPT = 3
ITEM_NAVIGATION = 4
ITEM_VECTOR = 5
ITEM_FONT = 6
ITEM_VIDEO = 7
ITEM_AUDIO = 8
ITEM_DOCUMENT = 9
# EXTENSION MAPPER
EXTENSIONS = {ITEM_IMAGE: ['.jpg', '.jpeg', '.gif', '.tiff', '.tif', '.png'],
ITEM_STYLE: ['.css'],
ITEM_VECTOR: ['.svg'],
ITEM_FONT: ['.otf', '.woff'],
ITEM_SCRIPT: ['.js'],
ITEM_NAVIGATION: ['.ncx'],
ITEM_VIDEO: ['.mov', '.mp4', '.avi'],
ITEM_AUDIO: ['.mp3', '.ogg']
}
|
gpl-3.0
|
miconof/CouchPotatoServer
|
libs/pyutil/assertutil.py
|
106
|
2753
|
# Copyright (c) 2003-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Tests useful in assertion checking, prints out nicely formated messages too.
"""
from humanreadable import hr
def _assert(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=[]
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def precondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["precondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def postcondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["postcondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
|
gpl-3.0
|
xin3liang/platform_external_chromium_org
|
tools/safely-roll-deps.py
|
16
|
5639
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a CL to roll a DEPS entry to the specified revision number and post
it to Rietveld so that the CL will land automatically if it passes the
commit-queue's checks.
"""
import logging
import optparse
import os
import re
import sys
import find_depot_tools
import scm
import subprocess2
def die_with_error(msg):
print >> sys.stderr, msg
sys.exit(1)
def process_deps(path, project, new_rev, is_dry_run):
"""Update project_revision to |new_issue|.
A bit hacky, could it be made better?
"""
content = open(path).read()
# Hack for Blink to get the AutoRollBot running again.
if project == "blink":
project = "webkit"
old_line = r'(\s+)"%s_revision": "([0-9a-f]{2,40})",' % project
new_line = r'\1"%s_revision": "%s",' % (project, new_rev)
new_content = re.sub(old_line, new_line, content, 1)
old_rev = re.search(old_line, content).group(2)
if not old_rev or new_content == content:
die_with_error('Failed to update the DEPS file')
if not is_dry_run:
open(path, 'w').write(new_content)
return old_rev
class PrintSubprocess(object):
"""Wrapper for subprocess2 which prints out every command."""
def __getattr__(self, attr):
def _run_subprocess2(cmd, *args, **kwargs):
print cmd
sys.stdout.flush()
return getattr(subprocess2, attr)(cmd, *args, **kwargs)
return _run_subprocess2
prnt_subprocess = PrintSubprocess()
def main():
tool_dir = os.path.dirname(os.path.abspath(__file__))
parser = optparse.OptionParser(usage='%prog [options] <project> <new rev>',
description=sys.modules[__name__].__doc__)
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option('--dry-run', action='store_true')
parser.add_option('-f', '--force', action='store_true',
help='Make destructive changes to the local checkout if '
'necessary.')
parser.add_option('--commit', action='store_true', default=True,
help='(default) Put change in commit queue on upload.')
parser.add_option('--no-commit', action='store_false', dest='commit',
help='Don\'t put change in commit queue on upload.')
parser.add_option('-r', '--reviewers', default='',
help='Add given users as either reviewers or TBR as'
' appropriate.')
parser.add_option('--upstream', default='origin/master',
help='(default "%default") Use given start point for change'
' to upload. For instance, if you use the old git workflow,'
' you might set it to "origin/trunk".')
parser.add_option('--cc', help='CC email addresses for issue.')
parser.add_option('-m', '--message', help='Custom commit message.')
options, args = parser.parse_args()
logging.basicConfig(
level=
[logging.WARNING, logging.INFO, logging.DEBUG][
min(2, options.verbose)])
if len(args) != 2:
parser.print_help()
exit(0)
root_dir = os.path.dirname(tool_dir)
os.chdir(root_dir)
project = args[0]
new_rev = args[1]
# Silence the editor.
os.environ['EDITOR'] = 'true'
if options.force and not options.dry_run:
prnt_subprocess.check_call(['git', 'clean', '-d', '-f'])
prnt_subprocess.call(['git', 'rebase', '--abort'])
old_branch = scm.GIT.GetBranch(root_dir)
new_branch = '%s_roll' % project
if options.upstream == new_branch:
parser.error('Cannot set %s as its own upstream.' % new_branch)
if old_branch == new_branch:
if options.force:
if not options.dry_run:
prnt_subprocess.check_call(['git', 'checkout', options.upstream, '-f'])
prnt_subprocess.call(['git', 'branch', '-D', old_branch])
else:
parser.error('Please delete the branch %s and move to a different branch'
% new_branch)
if not options.dry_run:
prnt_subprocess.check_call(['git', 'fetch', 'origin'])
prnt_subprocess.call(['git', 'svn', 'fetch'])
branch_cmd = ['git', 'checkout', '-b', new_branch, options.upstream]
if options.force:
branch_cmd.append('-f')
prnt_subprocess.check_output(branch_cmd)
try:
old_rev = process_deps(os.path.join(root_dir, 'DEPS'), project, new_rev,
options.dry_run)
print '%s roll %s:%s' % (project.title(), old_rev, new_rev)
review_field = 'TBR' if options.commit else 'R'
commit_msg = options.message or '%s roll %s:%s\n' % (project.title(),
old_rev, new_rev)
commit_msg += '\n%s=%s\n' % (review_field, options.reviewers)
if options.dry_run:
print 'Commit message: ' + commit_msg
return 0
prnt_subprocess.check_output(['git', 'commit', '-m', commit_msg, 'DEPS'])
prnt_subprocess.check_call(['git', 'diff', '--no-ext-diff',
options.upstream])
upload_cmd = ['git', 'cl', 'upload', '--bypass-hooks']
if options.commit:
upload_cmd.append('--use-commit-queue')
if options.reviewers:
upload_cmd.append('--send-mail')
if options.cc:
upload_cmd.extend(['--cc', options.cc])
prnt_subprocess.check_call(upload_cmd)
finally:
if not options.dry_run:
prnt_subprocess.check_output(['git', 'checkout', old_branch])
prnt_subprocess.check_output(['git', 'branch', '-D', new_branch])
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
mjn19172/Savu
|
savu/data/experiment_collection.py
|
1
|
3664
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: experiment_collection
:platform: Unix
:synopsis: Contains the Experiment class and all possible experiment
collections from which Experiment can inherit at run time.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import os
import time
import logging
from mpi4py import MPI
from savu.data.plugin_list import PluginList
from savu.data.data_structures import Data
from savu.data.meta_data import MetaData
class Experiment(object):
"""
One instance of this class is created at the beginning of the
processing chain and remains until the end. It holds the current data
object and a dictionary containing all metadata.
"""
def __init__(self, options):
self.meta_data = MetaData(options)
self.meta_data_setup(options["process_file"])
self.index = {"in_data": {}, "out_data": {}}
def meta_data_setup(self, process_file):
self.meta_data.load_experiment_collection()
self.meta_data.plugin_list = PluginList()
self.meta_data.plugin_list.populate_plugin_list(process_file)
def create_data_object(self, dtype, name, bases=[]):
try:
self.index[dtype][name]
except KeyError:
self.index[dtype][name] = Data(name)
data_obj = self.index[dtype][name]
bases.append(data_obj.get_transport_data(self.meta_data.get_meta_data("transport")))
data_obj.add_base_classes(bases)
return self.index[dtype][name]
def set_nxs_filename(self):
name = self.index["in_data"].keys()[0]
filename = os.path.basename(self.index["in_data"][name].backing_file.filename)
filename = os.path.splitext(filename)[0]
filename = os.path.join(self.meta_data.get_meta_data("out_path"),
"%s_processed_%s.nxs" % (filename,
time.strftime("%Y%m%d%H%M%S")))
self.meta_data.set_meta_data("nxs_filename", filename)
def clear_data_objects(self):
self.index["out_data"] = {}
self.index["in_data"] = {}
def clear_out_data_objects(self):
self.index["out_data"] = {}
def set_out_data_to_in(self):
self.index["in_data"] = self.index["out_data"]
self.index["out_data"] = {}
def barrier(self):
if self.meta_data.get_meta_data('mpi') is True:
logging.debug("About to hit a barrier")
MPI.COMM_WORLD.Barrier()
logging.debug("Past the barrier")
def log(self, log_tag, log_level=logging.DEBUG):
"""
Log the contents of the experiment at the specified level
"""
logging.log(log_level, "Experimental Parameters for %s", log_tag)
for key, value in self.index["in_data"].iteritems():
logging.log(log_level, "in data (%s) shape = %s", key,
value.get_shape())
for key, value in self.index["in_data"].iteritems():
logging.log(log_level, "out data (%s) shape = %s", key,
value.get_shape())
|
apache-2.0
|
viur-framework/server
|
render/vi/__init__.py
|
1
|
4264
|
# -*- coding: utf-8 -*-
from server.render.vi.default import DefaultRender as default
from server.render.vi.user import UserRender as user
from server.render.json.file import FileRender as file
from server.skeleton import Skeleton
from google.appengine.api import app_identity
from server import conf
from server import securitykey
from server import utils
from server import request
from server import session
from server import errors
import datetime, json
__all__=[ default ]
def genSkey( *args, **kwargs ):
return json.dumps( securitykey.create() )
genSkey.exposed=True
def timestamp( *args, **kwargs):
d = datetime.datetime.now()
return( json.dumps( d.strftime("%Y-%m-%dT%H-%M-%S") ) )
timestamp.exposed=True
def getStructure( adminTree, module ):
if not module in dir( adminTree ) \
or not "adminInfo" in dir( getattr( adminTree, module ) )\
or not getattr( adminTree, module ).adminInfo:
# Module not known or no adminInfo for that module
return( json.dumps( None ) )
res = {}
try:
moduleObj = getattr( adminTree, module )
except:
return( None )
for stype in ["viewSkel","editSkel","addSkel", "viewLeafSkel", "viewNodeSkel", "editNodeSkel", "editLeafSkel", "addNodeSkel", "addLeafSkel"]: #Unknown skel type
if stype in dir( moduleObj ):
try:
skel = getattr( moduleObj, stype )()
except:
continue
if isinstance( skel, Skeleton ):
res[ stype ] = default().renderSkelStructure( skel )
if res:
return( json.dumps( res ) )
else:
return( json.dumps( None ) )
def setLanguage( lang, skey):
if not securitykey.validate( skey ):
return
if lang in conf["viur.availableLanguages"]:
session.current.setLanguage( lang )
setLanguage.exposed=True
def dumpConfig( adminTree ):
adminConfig = {}
for key in dir( adminTree ):
app = getattr( adminTree, key )
if "adminInfo" in dir( app ) and app.adminInfo:
if callable( app.adminInfo ):
info = app.adminInfo()
if info is not None:
adminConfig[ key ] = info
else:
adminConfig[ key ] = app.adminInfo.copy()
adminConfig[ key ]["name"] = _(adminConfig[ key ]["name"])
adminConfig[ key ]["views"] = []
if "views" in app.adminInfo:
for v in app.adminInfo["views"]:
tmp = v.copy()
tmp["name"] = _(tmp["name"])
adminConfig[ key ]["views"].append( tmp )
res = { "capabilities": conf["viur.capabilities"],
"modules": adminConfig,
"configuration": {}
}
for k, v in conf.items():
if k.lower().startswith("admin."):
res["configuration"][ k[ 6: ] ] = v
return json.dumps( res )
def getVersion(*args, **kwargs):
# We force the patch-level of our version to be always zero for security reasons
return json.dumps((conf["viur.version"][0], conf["viur.version"][1], 0))
getVersion.exposed=True
def canAccess( *args, **kwargs ):
user = utils.getCurrentUser()
if user and ("root" in user["access"] or "admin" in user["access"]):
return True
pathList = request.current.get().pathlist
if len( pathList ) >= 2 and pathList[1] in ["skey", "getVersion"]:
# Give the user the chance to login :)
return True
if (len( pathList ) >= 3
and pathList[1] == "user"
and (pathList[2].startswith("auth_")
or pathList[2].startswith("f2_")
or pathList[2] == "getAuthMethods"
or pathList[2] == "logout")):
# Give the user the chance to login :)
return True
if (len(pathList) >= 4
and pathList[1] == "user"
and pathList[2] == "view"
and pathList[3] == "self"):
# Give the user the chance to view himself.
return True
return False
def index(*args, **kwargs):
if request.current.get().isDevServer or request.current.get().isSSLConnection:
raise errors.Redirect("/vi/s/main.html")
else:
appVersion = app_identity.get_default_version_hostname()
raise errors.Redirect("https://%s/vi/s/main.html" % appVersion)
index.exposed=True
def _postProcessAppObj( obj ):
obj.skey = genSkey
obj.timestamp = timestamp
obj.config = lambda *args, **kwargs: dumpConfig( obj )
obj.config.exposed=True
obj.getStructure = lambda *args, **kwargs: getStructure( obj, *args, **kwargs )
obj.getStructure.exposed = True
obj.canAccess = canAccess
obj.setLanguage = setLanguage
obj.getVersion = getVersion
obj.index = index
return obj
|
lgpl-3.0
|
mrinalabrol/magik
|
env/lib/python3.4/site-packages/pip/_vendor/lockfile/pidlockfile.py
|
488
|
6221
|
# -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
|
gpl-3.0
|
tvibliani/odoo
|
openerp/tools/__init__.py
|
337
|
1447
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
import win32
import appdirs
from config import config
from misc import *
from convert import *
from translate import *
from graph import graph
from image import *
from amount_to_text import *
from amount_to_text_en import *
from pdf_utils import *
from yaml_import import *
from sql import *
from float_utils import *
from mail import *
from func import *
from debugger import *
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
WendellDuncan/or-tools
|
examples/python/fill_a_pix.py
|
5
|
5022
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fill-a-Pix problem in Google CP Solver.
From
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/basiclogic
'''
Each puzzle consists of a grid containing clues in various places. The
object is to reveal a hidden picture by painting the squares around each
clue so that the number of painted squares, including the square with
the clue, matches the value of the clue.
'''
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/rules
'''
Fill-a-Pix is a Minesweeper-like puzzle based on a grid with a pixilated
picture hidden inside. Using logic alone, the solver determines which
squares are painted and which should remain empty until the hidden picture
is completely exposed.
'''
Fill-a-pix History:
http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/history
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/fill_a_pix.mzn
* SICStus Prolog: http://www.hakank.org/sicstus/fill_a_pix.pl
* ECLiPSe: http://hakank.org/eclipse/fill_a_pix.ecl
* Gecode: http://hakank.org/gecode/fill_a_pix.cpp
And see the Minesweeper model:
* http://www.hakank.org/google_or_tools/minesweeper.py
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
# Puzzle 1 from
# http://www.conceptispuzzles.com/index.aspx?uri=puzzle/fill-a-pix/rules
default_n = 10
X = -1
default_puzzle = [
[X, X, X, X, X, X, X, X, 0, X],
[X, 8, 8, X, 2, X, 0, X, X, X],
[5, X, 8, X, X, X, X, X, X, X],
[X, X, X, X, X, 2, X, X, X, 2],
[1, X, X, X, 4, 5, 6, X, X, X],
[X, 0, X, X, X, 7, 9, X, X, 6],
[X, X, X, 6, X, X, 9, X, X, 6],
[X, X, 6, 6, 8, 7, 8, 7, X, 5],
[X, 4, X, 6, 6, 6, X, 6, X, 4],
[X, X, X, X, X, X, 3, X, X, X]
]
def main(puzzle='', n=''):
# Create the solver.
solver = pywrapcp.Solver('Fill-a-Pix')
#
# data
#
# Set default problem
if puzzle == '':
puzzle = default_puzzle
n = default_n
else:
print('n:', n)
# for the neighbors of 'this' cell
S = [-1, 0, 1]
# print problem instance
print('Problem:')
for i in range(n):
for j in range(n):
if puzzle[i][j] == X:
sys.stdout.write('.')
else:
sys.stdout.write(str(puzzle[i][j]))
print()
print()
#
# declare variables
#
pict = {}
for i in range(n):
for j in range(n):
pict[(i, j)] = solver.IntVar(0, 1, 'pict %i %i' % (i, j))
pict_flat = [pict[i, j] for i in range(n) for j in range(n)]
#
# constraints
#
for i in range(n):
for j in range(n):
if puzzle[i][j] > X:
# this cell is the sum of all the surrounding cells
solver.Add(
puzzle[i][j] == solver.Sum([pict[i + a, j + b]
for a in S for b in S
if i + a >= 0 and
j + b >= 0 and
i + a < n and
j + b < n])
)
#
# solution and search
#
db = solver.Phase(pict_flat,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
print('Solution:')
while solver.NextSolution():
num_solutions += 1
for i in range(n):
row = [str(pict[i, j].Value()) for j in range(n)]
for j in range(n):
if row[j] == '0':
row[j] = ' '
else:
row[j] = '#'
print(''.join(row))
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
#
# Read a problem instance from a file
#
def read_problem(file):
f = open(file, 'r')
n = int(f.readline())
puzzle = []
for i in range(n):
x = f.readline()
row = [0] * n
for j in range(n):
if x[j] == '.':
tmp = -1
else:
tmp = int(x[j])
row[j] = tmp
puzzle.append(row)
return [puzzle, n]
if __name__ == '__main__':
if len(sys.argv) > 1:
file = sys.argv[1]
print('Problem instance from', file)
[puzzle, n] = read_problem(file)
main(puzzle, n)
else:
main()
|
apache-2.0
|
AMOboxTV/AMOBox.LegoBuild
|
script.extendedinfo/resources/lib/LocalDB.py
|
1
|
22918
|
# -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import simplejson as json
import itertools
import KodiJson
from Utils import *
class LocalDB(object):
def __init__(self, *args, **kwargs):
self.movie_imdbs = []
self.movie_ids = []
self.movie_titles = []
self.movie_otitles = []
self.tvshow_ids = []
self.tvshow_originaltitles = []
self.tvshow_titles = []
self.tvshow_imdbs = []
self.artists = []
self.albums = []
def get_artists(self):
self.artists = KodiJson.get_artists(properties=["musicbrainzartistid", "thumbnail"])
return self.artists
def get_similar_artists(self, artist_id):
import LastFM
simi_artists = LastFM.get_similar_artists(artist_id)
if simi_artists is None:
log('Last.fm didn\'t return proper response')
return None
if not self.artists:
self.artists = self.get_artists()
artists = []
for simi_artist, kodi_artist in itertools.product(simi_artists, self.artists):
if kodi_artist['musicbrainzartistid'] and kodi_artist['musicbrainzartistid'] == simi_artist['mbid']:
artists.append(kodi_artist)
elif kodi_artist['artist'] == simi_artist['name']:
data = get_kodi_json(method="AudioLibrary.GetArtistDetails",
params='{"properties": ["genre", "description", "mood", "style", "born", "died", "formed", "disbanded", "yearsactive", "instrument", "fanart", "thumbnail"], "artistid": %s}' % str(kodi_artist['artistid']))
item = data["result"]["artistdetails"]
artists.append({"title": item['label'],
"Genre": " / ".join(item['genre']),
"thumb": item['thumbnail'],
"Fanart": item['fanart'],
"Description": item['description'],
"Born": item['born'],
"Died": item['died'],
"Formed": item['formed'],
"Disbanded": item['disbanded'],
"YearsActive": " / ".join(item['yearsactive']),
"Style": " / ".join(item['style']),
"Mood": " / ".join(item['mood']),
"Instrument": " / ".join(item['instrument']),
"LibraryPath": 'musicdb://artists/' + str(item['artistid']) + '/'})
log('%i of %i artists found in last.FM are in Kodi database' % (len(artists), len(simi_artists)))
return artists
def get_similar_movies(self, dbid):
movie = get_kodi_json(method="VideoLibrary.GetMovieDetails",
params='{"properties": ["genre","director","country","year","mpaa"], "movieid":%s }' % dbid)
if "moviedetails" not in movie['result']:
return []
comp_movie = movie['result']['moviedetails']
genres = comp_movie['genre']
data = get_kodi_json(method="VideoLibrary.GetMovies",
params='{"properties": ["genre","director","mpaa","country","year"], "sort": { "method": "random" } }')
if "movies" not in data['result']:
return []
quotalist = []
for item in data['result']['movies']:
item["mediatype"] = "movie"
diff = abs(int(item['year']) - int(comp_movie['year']))
hit = 0.0
miss = 0.00001
quota = 0.0
for genre in genres:
if genre in item['genre']:
hit += 1.0
else:
miss += 1.0
if hit > 0.0:
quota = float(hit) / float(hit + miss)
if genres and item['genre'] and genres[0] == item['genre'][0]:
quota += 0.3
if diff < 3:
quota += 0.3
elif diff < 6:
quota += 0.15
if comp_movie['country'] and item['country'] and comp_movie['country'][0] == item['country'][0]:
quota += 0.4
if comp_movie['mpaa'] and item['mpaa'] and comp_movie['mpaa'] == item['mpaa']:
quota += 0.4
if comp_movie['director'] and item['director'] and comp_movie['director'][0] == item['director'][0]:
quota += 0.6
quotalist.append((quota, item["movieid"]))
quotalist = sorted(quotalist,
key=lambda quota: quota[0],
reverse=True)
movies = []
for i, list_movie in enumerate(quotalist):
if comp_movie['movieid'] is not list_movie[1]:
newmovie = self.get_movie(list_movie[1])
movies.append(newmovie)
if i == 20:
break
return movies
def get_movies(self, filter_str="", limit=10):
props = '"properties": ["title", "originaltitle", "votes", "playcount", "year", "genre", "studio", "country", "tagline", "plot", "runtime", "file", "plotoutline", "lastplayed", "trailer", "rating", "resume", "art", "streamdetails", "mpaa", "director", "writer", "cast", "dateadded", "imdbnumber"]'
data = get_kodi_json(method="VideoLibrary.GetMovies",
params='{%s, %s, "limits": {"end": %d}}' % (props, filter_str, limit))
if "result" in data and "movies" in data["result"]:
return [self.handle_movies(item) for item in data["result"]["movies"]]
else:
return []
def get_tvshows(self, filter_str="", limit=10):
props = '"properties": ["title", "genre", "year", "rating", "plot", "studio", "mpaa", "cast", "playcount", "episode", "imdbnumber", "premiered", "votes", "lastplayed", "fanart", "thumbnail", "file", "originaltitle", "sorttitle", "episodeguide", "season", "watchedepisodes", "dateadded", "tag", "art"]'
data = get_kodi_json(method="VideoLibrary.GetTVShows",
params='{%s, %s, "limits": {"end": %d}}' % (props, filter_str, limit))
if "result" in data and "tvshows" in data["result"]:
return [self.handle_tvshows(item) for item in data["result"]["tvshows"]]
else:
return []
def handle_movies(self, movie):
trailer = "plugin://script.extendedinfo/?info=playtrailer&&dbid=%s" % str(movie['movieid'])
if SETTING("infodialog_onclick") != "false":
path = 'plugin://script.extendedinfo/?info=extendedinfo&&dbid=%s' % str(movie['movieid'])
else:
path = 'plugin://script.extendedinfo/?info=playmovie&&dbid=%i' % movie['movieid']
if (movie['resume']['position'] and movie['resume']['total']) > 0:
resume = "true"
played = '%s' % int((float(movie['resume']['position']) / float(movie['resume']['total'])) * 100)
else:
resume = "false"
played = '0'
stream_info = media_streamdetails(movie['file'].encode('utf-8').lower(), movie['streamdetails'])
db_movie = {'fanart': movie["art"].get('fanart', ""),
'poster': movie["art"].get('poster', ""),
'Banner': movie["art"].get('banner', ""),
'clearart': movie["art"].get('clearart', ""),
'DiscArt': movie["art"].get('discart', ""),
'title': movie.get('label', ""),
'File': movie.get('file', ""),
'year': str(movie.get('year', "")),
'writer': " / ".join(movie['writer']),
'Logo': movie['art'].get("clearlogo", ""),
'OriginalTitle': movie.get('originaltitle', ""),
'imdb_id': movie.get('imdbnumber', ""),
'path': path,
'plot': movie.get('plot', ""),
'director': " / ".join(movie.get('director')),
'writer': " / ".join(movie.get('writer')),
'PercentPlayed': played,
'Resume': resume,
# 'SubtitleLanguage': " / ".join(subs),
# 'AudioLanguage': " / ".join(streams),
'Play': "",
'trailer': trailer,
'dbid': str(movie['movieid']),
'Rating': str(round(float(movie['rating']), 1))}
streams = []
for i, item in enumerate(movie['streamdetails']['audio']):
language = item['language']
if language not in streams and language != "und":
streams.append(language)
db_movie['AudioLanguage.%d' % (i + 1)] = language
db_movie['AudioCodec.%d' % (i + 1)] = item['codec']
db_movie['AudioChannels.%d' % (i + 1)] = str(item['channels'])
subs = []
for i, item in enumerate(movie['streamdetails']['subtitle']):
language = item['language']
if language not in subs and language != "und":
subs.append(language)
db_movie['SubtitleLanguage.%d' % (i + 1)] = language
db_movie.update(stream_info)
return dict((k, v) for k, v in db_movie.iteritems() if v)
def handle_tvshows(self, tvshow):
if SETTING("infodialog_onclick") != "false":
path = 'plugin://script.extendedinfo/?info=extendedtvinfo&&dbid=%s' % tvshow['tvshowid']
else:
path = 'plugin://script.extendedinfo/?info=action&&id=ActivateWindow(videos,videodb://tvshows/titles/%s/,return)' % tvshow['tvshowid']
db_tvshow = {'fanart': tvshow["art"].get('fanart', ""),
'poster': tvshow["art"].get('poster', ""),
'Banner': tvshow["art"].get('banner', ""),
'DiscArt': tvshow["art"].get('discart', ""),
'title': tvshow.get('label', ""),
'genre': " / ".join(tvshow.get('genre', "")),
'File': tvshow.get('file', ""),
'year': str(tvshow.get('year', "")),
'Logo': tvshow['art'].get("clearlogo", ""),
'OriginalTitle': tvshow.get('originaltitle', ""),
'imdb_id': tvshow.get('imdbnumber', ""),
'path': path,
'Play': "",
'dbid': str(tvshow['tvshowid']),
'Rating': str(round(float(tvshow['rating']), 1))}
return dict((k, v) for k, v in db_tvshow.iteritems() if v)
def get_movie(self, movie_id):
response = get_kodi_json(method="VideoLibrary.GetMovieDetails",
params='{"properties": ["title", "originaltitle", "votes", "playcount", "year", "genre", "studio", "country", "tagline", "plot", "runtime", "file", "plotoutline", "lastplayed", "trailer", "rating", "resume", "art", "streamdetails", "mpaa", "director", "writer", "cast", "dateadded", "imdbnumber"], "movieid":%s }' % str(movie_id))
if "result" in response and "moviedetails" in response["result"]:
return self.handle_movies(response["result"]["moviedetails"])
return {}
def get_tvshow(self, tvshow_id):
response = get_kodi_json(method="VideoLibrary.GetTVShowDetails",
params='{"properties": ["title", "genre", "year", "rating", "plot", "studio", "mpaa", "cast", "playcount", "episode", "imdbnumber", "premiered", "votes", "lastplayed", "fanart", "thumbnail", "file", "originaltitle", "sorttitle", "episodeguide", "season", "watchedepisodes", "dateadded", "tag", "art"], "tvshowid":%s }' % str(tvshow_id))
if "result" in response and "tvshowdetails" in response["result"]:
return self.handle_tvshows(response["result"]["tvshowdetails"])
return {}
def get_albums(self):
data = get_kodi_json(method="AudioLibrary.GetAlbums",
params='{"properties": ["title"]}')
if "result" in data and "albums" in data['result']:
return data['result']['albums']
else:
return []
def create_channel_list(self):
data = get_kodi_json(method="PVR.GetChannels",
params='{"channelgroupid":"alltv", "properties": [ "thumbnail", "locked", "hidden", "channel", "lastplayed" ]}')
if 'result' in data and "movies" in data["result"]:
return data
else:
return False
def merge_with_local_movie_info(self, online_list=[], library_first=True, sortkey=False):
if not self.movie_titles:
now = time.time()
self.movie_ids = HOME.getProperty("movie_ids.JSON")
if self.movie_ids and self.movie_ids != "[]":
self.movie_ids = json.loads(self.movie_ids)
self.movie_otitles = json.loads(HOME.getProperty("movie_otitles.JSON"))
self.movie_titles = json.loads(HOME.getProperty("movie_titles.JSON"))
self.movie_imdbs = json.loads(HOME.getProperty("movie_imdbs.JSON"))
else:
data = get_kodi_json(method="VideoLibrary.GetMovies",
params='{"properties": ["originaltitle", "imdbnumber"], "sort": {"method": "none"}}')
self.movie_ids = []
self.movie_imdbs = []
self.movie_otitles = []
self.movie_titles = []
if "result" in data and "movies" in data["result"]:
for item in data["result"]["movies"]:
self.movie_ids.append(item["movieid"])
self.movie_imdbs.append(item["imdbnumber"])
self.movie_otitles.append(item["originaltitle"].lower())
self.movie_titles.append(item["label"].lower())
HOME.setProperty("movie_ids.JSON", json.dumps(self.movie_ids))
HOME.setProperty("movie_otitles.JSON", json.dumps(self.movie_otitles))
HOME.setProperty("movie_titles.JSON", json.dumps(self.movie_titles))
HOME.setProperty("movie_imdbs.JSON", json.dumps(self.movie_imdbs))
log("create_light_movielist: " + str(now - time.time()))
now = time.time()
local_items = []
remote_items = []
for online_item in online_list:
index = False
if "imdb_id" in online_item and online_item["imdb_id"] in self.movie_imdbs:
index = self.movie_imdbs.index(online_item["imdb_id"])
elif online_item['title'].lower() in self.movie_titles:
index = self.movie_titles.index(online_item['title'].lower())
elif "OriginalTitle" in online_item and online_item["OriginalTitle"].lower() in self.movie_otitles:
index = self.movie_otitles.index(online_item["OriginalTitle"].lower())
if index:
local_item = self.get_movie(self.movie_ids[index])
if local_item:
try:
diff = abs(int(local_item["year"]) - int(online_item["year"]))
if diff > 1:
remote_items.append(online_item)
continue
except:
pass
online_item.update(local_item)
if library_first:
local_items.append(online_item)
else:
remote_items.append(online_item)
else:
remote_items.append(online_item)
else:
remote_items.append(online_item)
log("compare time: " + str(now - time.time()))
if sortkey:
local_items = sorted(local_items, key=lambda k: k[sortkey], reverse=True)
remote_items = sorted(remote_items, key=lambda k: k[sortkey], reverse=True)
return local_items + remote_items
def merge_with_local_tvshow_info(self, online_list=[], library_first=True, sortkey=False):
if not self.tvshow_titles:
now = time.time()
self.tvshow_ids = HOME.getProperty("tvshow_ids.JSON")
if self.tvshow_ids and self.tvshow_ids != "[]":
self.tvshow_ids = json.loads(self.tvshow_ids)
self.tvshow_originaltitles = json.loads(HOME.getProperty("tvshow_originaltitles.JSON"))
self.tvshow_titles = json.loads(HOME.getProperty("tvshow_titles.JSON"))
self.tvshow_imdbs = json.loads(HOME.getProperty("tvshow_imdbs.JSON"))
else:
data = get_kodi_json(method="VideoLibrary.GetTVShows",
params='{"properties": ["originaltitle", "imdbnumber"], "sort": { "method": "none" } }')
self.tvshow_ids = []
self.tvshow_imdbs = []
self.tvshow_originaltitles = []
self.tvshow_titles = []
if "result" in data and "tvshows" in data["result"]:
for item in data["result"]["tvshows"]:
self.tvshow_ids.append(item["tvshowid"])
self.tvshow_imdbs.append(item["imdbnumber"])
self.tvshow_originaltitles.append(item["originaltitle"].lower())
self.tvshow_titles.append(item["label"].lower())
HOME.setProperty("tvshow_ids.JSON", json.dumps(self.tvshow_ids))
HOME.setProperty("tvshow_originaltitles.JSON", json.dumps(self.tvshow_originaltitles))
HOME.setProperty("tvshow_titles.JSON", json.dumps(self.tvshow_titles))
HOME.setProperty("tvshow_imdbs.JSON", json.dumps(self.tvshow_imdbs))
log("create_light_tvshowlist: " + str(now - time.time()))
now = time.time()
local_items = []
remote_items = []
for online_item in online_list:
found = False
if "imdb_id" in online_item and online_item["imdb_id"] in self.tvshow_imdbs:
index = self.tvshow_imdbs.index(online_item["imdb_id"])
found = True
elif online_item['title'].lower() in self.tvshow_titles:
index = self.tvshow_titles.index(online_item['title'].lower())
found = True
elif "OriginalTitle" in online_item and online_item["OriginalTitle"].lower() in self.tvshow_originaltitles:
index = self.tvshow_originaltitles.index(online_item["OriginalTitle"].lower())
found = True
if found:
local_item = self.get_tvshow(self.tvshow_ids[index])
if local_item:
try:
diff = abs(int(local_item["year"]) - int(online_item["year"]))
if diff > 1:
remote_items.append(online_item)
continue
except:
pass
online_item.update(local_item)
if library_first:
local_items.append(online_item)
else:
remote_items.append(online_item)
else:
remote_items.append(online_item)
else:
remote_items.append(online_item)
log("compare time: " + str(now - time.time()))
if sortkey:
local_items = sorted(local_items,
key=lambda k: k[sortkey],
reverse=True)
remote_items = sorted(remote_items,
key=lambda k: k[sortkey],
reverse=True)
return local_items + remote_items
def compare_album_with_library(self, online_list):
if not self.albums:
self.albums = self.get_albums()
for online_item in online_list:
for local_item in self.albums:
if not online_item["name"] == local_item["title"]:
continue
data = get_kodi_json(method="AudioLibrary.getAlbumDetails",
params='{"properties": ["thumbnail"], "albumid":%s }' % str(local_item["albumid"]))
album = data["result"]["albumdetails"]
online_item["dbid"] = album["albumid"]
online_item["path"] = 'plugin://script.extendedinfo/?info=playalbum&&dbid=%i' % album['albumid']
if album["thumbnail"]:
online_item.update({"thumb": album["thumbnail"]})
online_item.update({"Icon": album["thumbnail"]})
break
return online_list
def get_set_name(self, dbid):
data = get_kodi_json(method="VideoLibrary.GetMovieDetails",
params='{"properties": ["setid"], "movieid":%s }' % dbid)
if "result" in data and "moviedetails" in data["result"]:
set_dbid = data['result']['moviedetails'].get('setid', "")
if set_dbid:
data = get_kodi_json(method="VideoLibrary.GetMovieSetDetails",
params='{"setid":%s }' % set_dbid)
return data['result']['setdetails'].get('label', "")
return ""
def get_imdb_id(self, media_type, dbid):
if not dbid:
return None
if media_type == "movie":
data = get_kodi_json(method="VideoLibrary.GetMovieDetails",
params='{"properties": ["imdbnumber","title", "year"], "movieid":%s }' % dbid)
if "result" in data and "moviedetails" in data["result"]:
return data['result']['moviedetails']['imdbnumber']
elif media_type == "tvshow":
data = get_kodi_json(method="VideoLibrary.GetTVShowDetails",
params='{"properties": ["imdbnumber","title", "year"], "tvshowid":%s }' % dbid)
if "result" in data and "tvshowdetails" in data["result"]:
return data['result']['tvshowdetails']['imdbnumber']
return None
def get_tvshow_id_by_episode(self, dbid):
if not dbid:
return None
data = get_kodi_json(method="VideoLibrary.GetEpisodeDetails",
params='{"properties": ["tvshowid"], "episodeid":%s }' % dbid)
if "episodedetails" in data["result"]:
return self.get_imdb_id(media_type="tvshow",
dbid=str(data['result']['episodedetails']['tvshowid']))
else:
return None
local_db = LocalDB()
|
gpl-2.0
|
jostep/tensorflow
|
tensorflow/python/kernel_tests/softplus_op_test.py
|
82
|
4807
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
def testGradGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
def testWarnInts(self):
# Running the op triggers address sanitizer errors, so we just make it
nn_ops.softplus(constant_op.constant(7))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
mark-ignacio/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
|
124
|
5111
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
from .profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
def _assert_default_profiler_name(self, os_name, expected_profiler_name):
profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
self.assertEqual(profiler_name, expected_profiler_name)
def test_default_profilers(self):
self._assert_default_profiler_name('mac', 'iprofiler')
self._assert_default_profiler_name('linux', 'perf')
self._assert_default_profiler_name('win32', None)
self._assert_default_profiler_name('freebsd', None)
def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
# Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
# Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._output_path, "/tmp/output/test.data")
class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
there
is
junk before the total line
Total: 3770 samples
76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
23 0.6% 13.6% 23 0.6% intHash (inline)
23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
"""
expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
|
bsd-3-clause
|
aboood40091/BFLIM-Extractor
|
build.py
|
1
|
1833
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BFLIM Extractor
# Version v2.3
# Copyright © 2016-2019 AboodXD
# This file is part of BFLIM Extractor.
# BFLIM Extractor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BFLIM Extractor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""build.py: Build an executable for BFLIM Extractor."""
import os
import shutil
import sys
from cx_Freeze import setup, Executable
version = '2.3'
# Pick a build directory
dir_ = 'bflim_extract v' + version
# Add the "build" parameter to the system argument list
if 'build' not in sys.argv:
sys.argv.append('build')
# Clear the directory
print('>> Clearing/creating directory...')
if os.path.isdir(dir_):
shutil.rmtree(dir_)
os.makedirs(dir_)
print('>> Directory ready!')
setup(
name='BFLIM Extractor',
version=version,
description='Wii U BFLIM Extractor',
author="AboodXD",
options={
'build_exe': {
'build_exe': dir_,
'optimize': 2,
'silent': True,
},
},
executables=[
Executable(
'bflim_extract.py',
),
],
)
print('>> Attempting to copy required files...')
shutil.copy('COPYING', dir_)
shutil.copy('README.md', dir_)
print('>> Files copied!')
print('>> BFLIM Extractor has been frozen to "%s"!' % dir_)
|
gpl-3.0
|
Lyrositor/moul-scripts
|
Python/system/encodings/mac_cyrillic.py
|
593
|
13710
|
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
aaronsw/watchdog
|
vendor/rdflib-2.4.0/test/store_performace.py
|
3
|
3343
|
import unittest
from rdflib.Graph import Graph
from rdflib import URIRef
import gc
import itertools
from time import time
from random import random
from tempfile import mkdtemp
def random_uri():
return URIRef("%s" % random())
class StoreTestCase(unittest.TestCase):
"""
Test case for testing store performance... probably should be
something other than a unit test... but for now we'll add it as a
unit test.
"""
store = 'default'
def setUp(self):
self.gcold = gc.isenabled()
gc.collect()
gc.disable()
self.graph = Graph(store=self.store)
if self.store == "MySQL":
from test.mysql import configString
from rdflib.store.MySQL import MySQL
path=configString
MySQL().destroy(path)
else:
path = a_tmp_dir = mkdtemp()
self.graph.open(path, create=True)
self.input = input = Graph()
input.parse("http://eikeon.com")
def tearDown(self):
self.graph.close()
if self.gcold:
gc.enable()
# TODO: delete a_tmp_dir
del self.graph
def testTime(self):
number = 1
print self.store
print "input:",
for i in itertools.repeat(None, number):
self._testInput()
print "random:",
for i in itertools.repeat(None, number):
self._testRandom()
print "."
def _testRandom(self):
number = len(self.input)
store = self.graph
def add_random():
s = random_uri()
p = random_uri()
o = random_uri()
store.add((s, p, o))
it = itertools.repeat(None, number)
t0 = time()
for _i in it:
add_random()
t1 = time()
print "%.3g" % (t1 - t0),
def _testInput(self):
number = 1
store = self.graph
def add_from_input():
for t in self.input:
store.add(t)
it = itertools.repeat(None, number)
t0 = time()
for _i in it:
add_from_input()
t1 = time()
print "%.3g" % (t1 - t0),
class MemoryStoreTestCase(StoreTestCase):
store = "Memory"
try:
from rdflib.store.Sleepycat import Sleepycat
class SleepycatStoreTestCase(StoreTestCase):
store = "Sleepycat"
except ImportError, e:
print "Can not test Sleepycat store:", e
try:
import persistent
# If we can import persistent then test ZODB store
class ZODBStoreTestCase(StoreTestCase):
non_standard_dep = True
store = "ZODB"
except ImportError, e:
print "Can not test ZODB store:", e
try:
import RDF
# If we can import RDF then test Redland store
class RedLandTestCase(StoreTestCase):
non_standard_dep = True
store = "Redland"
except ImportError, e:
print "Can not test Redland store:", e
# TODO: add test case for 4Suite backends? from Ft import Rdf
try:
# import todo # what kind of configuration string does open need?
import MySQLdb,sha,sys
# If we can import RDF then test Redland store
class MySQLTestCase(StoreTestCase):
non_standard_dep = True
store = "MySQL"
except ImportError, e:
print "Can not test MySQL store:", e
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
florian-dacosta/OpenUpgrade
|
addons/hr_timesheet/hr_timesheet.py
|
39
|
10037
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class hr_employee(osv.osv):
_name = "hr.employee"
_inherit = "hr.employee"
_columns = {
'product_id': fields.many2one('product.product', 'Product', help="If you want to reinvoice working time of employees, link this employee to a service to determinate the cost price of the job."),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'uom_id': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Unit of Measure', store=True, readonly=True)
}
def _getAnalyticJournal(self, cr, uid, context=None):
md = self.pool.get('ir.model.data')
try:
dummy, res_id = md.get_object_reference(cr, uid, 'hr_timesheet', 'analytic_journal')
#search on id found in result to check if current user has read access right
check_right = self.pool.get('account.analytic.journal').search(cr, uid, [('id', '=', res_id)], context=context)
if check_right:
return res_id
except ValueError:
pass
return False
def _getEmployeeProduct(self, cr, uid, context=None):
md = self.pool.get('ir.model.data')
try:
dummy, res_id = md.get_object_reference(cr, uid, 'product', 'product_product_consultant')
#search on id found in result to check if current user has read access right
check_right = self.pool.get('product.template').search(cr, uid, [('id', '=', res_id)], context=context)
if check_right:
return res_id
except ValueError:
pass
return False
_defaults = {
'journal_id': _getAnalyticJournal,
'product_id': _getEmployeeProduct
}
class hr_analytic_timesheet(osv.osv):
_name = "hr.analytic.timesheet"
_table = 'hr_analytic_timesheet'
_description = "Timesheet Line"
_inherits = {'account.analytic.line': 'line_id'}
_order = "id desc"
_columns = {
'line_id': fields.many2one('account.analytic.line', 'Analytic Line', ondelete='cascade', required=True),
'partner_id': fields.related('account_id', 'partner_id', type='many2one', string='Partner', relation='res.partner', store=True),
}
def unlink(self, cr, uid, ids, context=None):
toremove = {}
for obj in self.browse(cr, uid, ids, context=context):
toremove[obj.line_id.id] = True
super(hr_analytic_timesheet, self).unlink(cr, uid, ids, context=context)
self.pool.get('account.analytic.line').unlink(cr, uid, toremove.keys(), context=context)
return True
def on_change_unit_amount(self, cr, uid, id, prod_id, unit_amount, company_id, unit=False, journal_id=False, context=None):
res = {'value':{}}
if prod_id and unit_amount:
# find company
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=context)
r = self.pool.get('account.analytic.line').on_change_unit_amount(cr, uid, id, prod_id, unit_amount, company_id, unit, journal_id, context=context)
if r:
res.update(r)
# update unit of measurement
if prod_id:
uom = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
if uom.uom_id:
res['value'].update({'product_uom_id': uom.uom_id.id})
else:
res['value'].update({'product_uom_id': False})
return res
def _getEmployeeProduct(self, cr, uid, context=None):
if context is None:
context = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.product_id:
return emp.product_id.id
return False
def _getEmployeeUnit(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.product_id:
return emp.product_id.uom_id.id
return False
def _getGeneralAccount(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context)
if emp_id:
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if bool(emp.product_id):
a = emp.product_id.property_account_expense.id
if not a:
a = emp.product_id.categ_id.property_account_expense_categ.id
if a:
return a
return False
def _getAnalyticJournal(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
if context.get('employee_id'):
emp_id = [context.get('employee_id')]
else:
emp_id = emp_obj.search(cr, uid, [('user_id','=',context.get('user_id') or uid)], limit=1, context=context)
if not emp_id:
raise osv.except_osv(_('Warning!'), _('Please create an employee for this user, using the menu: Human Resources > Employees.'))
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.journal_id:
return emp.journal_id.id
else :
raise osv.except_osv(_('Warning!'), _('No analytic journal defined for \'%s\'.\nYou should assign an analytic journal on the employee form.')%(emp.name))
_defaults = {
'product_uom_id': _getEmployeeUnit,
'product_id': _getEmployeeProduct,
'general_account_id': _getGeneralAccount,
'journal_id': _getAnalyticJournal,
'date': lambda self, cr, uid, ctx: ctx.get('date', fields.date.context_today(self,cr,uid,context=ctx)),
'user_id': lambda obj, cr, uid, ctx: ctx.get('user_id') or uid,
}
def on_change_account_id(self, cr, uid, ids, account_id, context=None):
return {'value':{}}
def on_change_date(self, cr, uid, ids, date):
if ids:
new_date = self.read(cr, uid, ids[0], ['date'])['date']
if date != new_date:
warning = {'title':'User Alert!','message':'Changing the date will let this entry appear in the timesheet of the new date.'}
return {'value':{},'warning':warning}
return {'value':{}}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context)
ename = ''
if emp_id:
ename = emp_obj.browse(cr, uid, emp_id[0], context=context).name
if not vals.get('journal_id',False):
raise osv.except_osv(_('Warning!'), _('No \'Analytic Journal\' is defined for employee %s \nDefine an employee for the selected user and assign an \'Analytic Journal\'!')%(ename,))
if not vals.get('account_id',False):
raise osv.except_osv(_('Warning!'), _('No analytic account is defined on the project.\nPlease set one or we cannot automatically fill the timesheet.'))
return super(hr_analytic_timesheet, self).create(cr, uid, vals, context=context)
def on_change_user_id(self, cr, uid, ids, user_id):
if not user_id:
return {}
context = {'user_id': user_id}
return {'value': {
'product_id': self. _getEmployeeProduct(cr, uid, context),
'product_uom_id': self._getEmployeeUnit(cr, uid, context),
'general_account_id': self._getGeneralAccount(cr, uid, context),
'journal_id': self._getAnalyticJournal(cr, uid, context),
}}
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_timesheets': fields.boolean('Timesheets', help="Check this field if this project manages timesheets"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_timesheets'] = template.use_timesheets
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rasathus/pigredients
|
pigredients/ics/hmc5883l.py
|
1
|
4135
|
import sys
import smbus
import math
from Adafruit_I2C import Adafruit_I2C
_hmc5883l_address = 0x1e
_mode_register = 0x02
_mode_map = { 'continuous' : 0x00,
'single' : 0x01,
'idle' : 0x03}
_configuration_reg_a = 0x00
_configuration_reg_b = 0x01
_read_register = 0x03
class HMC5883L(object):
def __init__(self, i2c_bus=0, i2c_address=_hmc5883l_address, debug=False, declination_angle=0.0457):
self.debug = debug
self.declination_angle = declination_angle
self.gauss = 0.88
self.scale = 0x00
self._multiplication_factor = 0.73
# Create our i2c connection
self._bus = smbus.SMBus(i2c_bus)
self.i2c = Adafruit_I2C(i2c_address, bus=self._bus, debug=self.debug)
# set our compass to continuous mode.
self.set_mode('continuous')
def set_mode(self, mode):
if mode.lower() in _mode_map:
self.i2c.write8(_mode_register, _mode_map[mode.lower()])
else:
raise Exception('Invalid mode requested, valid modes are continuous, single and idle.')
def get_raw(self):
result = { 'x' : None, 'y' : None, 'z' : None }
# clear the buffer
read_buffer = self.i2c.readList(_read_register, 6)
result['x'] = (read_buffer[0] << 8) | read_buffer[1]
result['z'] = (read_buffer[2] << 8) | read_buffer[3]
result['y'] = (read_buffer[4] << 8) | read_buffer[5]
self.last_result = result
return result
def get_value(self):
result = self.get_raw()
result['x'] = result['x'] * self._multiplication_factor
result['z'] = result['z'] * self._multiplication_factor
result['y'] = result['y'] * self._multiplication_factor
return result
def set_scale(self, gauss):
if gauss == 0.88:
self.gauss = gauss
self.scale = 0x00
self._multiplication_factor = 0.73
elif gauss == 1.3:
self.gauss = gauss
self.scale = 0x01
self._multiplication_factor = 0.92
elif gauss == 1.9:
self.gauss = gauss
self.scale = 0x02
self._multiplication_factor = 1.22
elif gauss == 2.5:
self.gauss = gauss
self.scale = 0x03
self._multiplication_factor = 1.52
elif gauss == 4.0:
self.gauss = gauss
self.scale = 0x04
self._multiplication_factor = 2.27
elif gauss == 4.7:
self.gauss = gauss
self.scale = 0x05
self._multiplication_factor = 2.56
elif gauss == 5.6:
self.gauss = gauss
self.scale = 0x06
self._multiplication_factor = 3.03
elif gauss == 8.1:
self.gauss = gauss
self.scale = 0x07
self._multiplication_factor = 4.35
else:
raise Exception("Invalid gauss value, valid gauss values are 0.88, 1.3, 1.9, 2.5, 4.0, 4.7, 5.6 or 8.1")
# Setting is in the top 3 bits of the register.
self.scale = self.scale << 5
self.i2c.write8(_configuration_reg_b, self.scale)
def get_scale(self):
return self.scale
def get_heading(self):
scaled_data = self.get_value()
heading = math.atan2(scaled_data['y'], scaled_data['x'])
print "--Begining of Reading--"
print "Heading pre correction : %f" % heading
print "Heading Degrees pre correction : %f" % math.degrees(heading)
heading += self.declination_angle;
print "Pre-corrected heading %s" % heading
if heading < 0 :
heading = heading + 2 * math.pi
elif heading > 2 * math.pi:
heading = heading - 2 * math.pi
print "X Value : %f" % scaled_data['x']
print "Y Value : %f" % scaled_data['y']
print "Radians : %f" % heading
print "Degrees : %f" % math.degrees(heading)
print "--End of Reading--"
return math.degrees(heading)
|
mit
|
AOSP-S4-KK/platform_external_skia
|
bench/tile_analyze.py
|
198
|
12134
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file.
""" Analyze per-tile and viewport bench data, and output visualized results.
"""
__author__ = 'bensong@google.com (Ben Chen)'
import bench_util
import boto
import math
import optparse
import os
import re
import shutil
from oauth2_plugin import oauth2_plugin
# The default platform to analyze. Used when OPTION_PLATFORM flag is not set.
DEFAULT_PLATFORM = 'Nexus10_4-1_Float_Bench_32'
# Template for gsutil uri.
GOOGLE_STORAGE_URI_SCHEME = 'gs'
URI_BUCKET = 'chromium-skia-gm'
# Maximum number of rows of tiles to track for viewport covering.
MAX_TILE_ROWS = 8
# Constants for optparse.
USAGE_STRING = 'USAGE: %s [options]'
HOWTO_STRING = """
Note: to read bench data stored in Google Storage, you will need to set up the
corresponding Python library.
See http://developers.google.com/storage/docs/gspythonlibrary for details.
"""
HELP_STRING = """
For the given platform and revision number, find corresponding viewport and
tile benchmarks for each available picture bench, and output visualization and
analysis in HTML. By default it reads from Skia's Google Storage location where
bot data are stored, but if --dir is given, will read from local directory
instead.
""" + HOWTO_STRING
OPTION_DIR = '--dir'
OPTION_DIR_SHORT = '-d'
OPTION_REVISION = '--rev'
OPTION_REVISION_SHORT = '-r'
OPTION_PLATFORM = '--platform'
OPTION_PLATFORM_SHORT = '-p'
# Bench representation algorithm flag.
OPTION_REPRESENTATION_ALG = '--algorithm'
OPTION_REPRESENTATION_ALG_SHORT = '-a'
# Bench representation algorithm. See trunk/bench/bench_util.py.
REPRESENTATION_ALG = bench_util.ALGORITHM_25TH_PERCENTILE
# Constants for bench file matching.
GOOGLE_STORAGE_OBJECT_NAME_PREFIX = 'perfdata/Skia_'
BENCH_FILE_PREFIX_TEMPLATE = 'bench_r%s_'
TILING_FILE_NAME_INDICATOR = '_tile_'
VIEWPORT_FILE_NAME_INDICATOR = '_viewport_'
# Regular expression for matching format '<integer>x<integer>'.
DIMENSIONS_RE = '(\d+)x(\d+)'
# HTML and JS output templates.
HTML_PREFIX = """
<html><head><script type="text/javascript" src="https://www.google.com/jsapi">
</script><script type="text/javascript">google.load("visualization", "1.1",
{packages:["table"]});google.load("prototype", "1.6");</script>
<script type="text/javascript" src="https://systemsbiology-visualizations.googlecode.com/svn/trunk/src/main/js/load.js"></script><script
type="text/javascript"> systemsbiology.load("visualization", "1.0",
{packages:["bioheatmap"]});</script><script type="text/javascript">
google.setOnLoadCallback(drawVisualization); function drawVisualization() {
"""
HTML_SUFFIX = '</body></html>'
BAR_CHART_TEMPLATE = ('<img src="https://chart.googleapis.com/chart?chxr=0,0,'
'300&chxt=x&chbh=15,0&chs=600x150&cht=bhg&chco=80C65A,224499,FF0000,0A8C8A,'
'EBB671,DE091A,000000,00ffff&chds=a&chdl=%s&chd=t:%s" /><br>\n')
DRAW_OPTIONS = ('{passThroughBlack:false,useRowLabels:false,cellWidth:30,'
'cellHeight:30}')
TABLE_OPTIONS = '{showRowNumber:true,firstRowNumber:" ",sort:"disable"}'
def GetFiles(rev, bench_dir, platform):
"""Reads in bench files of interest into a dictionary.
If bench_dir is not empty, tries to read in local bench files; otherwise check
Google Storage. Filters files by revision (rev) and platform, and ignores
non-tile, non-viewport bench files.
Outputs dictionary [filename] -> [file content].
"""
file_dic = {}
if not bench_dir:
uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
# The boto API does not allow prefix/wildcard matching of Google Storage
# objects. And Google Storage has a flat structure instead of being
# organized in directories. Therefore, we have to scan all objects in the
# Google Storage bucket to find the files we need, which is slow.
# The option of implementing prefix matching as in gsutil seems to be
# overkill, but gsutil does not provide an API ready for use. If speed is a
# big concern, we suggest copying bot bench data from Google Storage using
# gsutil and use --log_dir for fast local data reading.
for obj in uri.get_bucket():
# Filters out files of no interest.
if (not obj.name.startswith(GOOGLE_STORAGE_OBJECT_NAME_PREFIX) or
(obj.name.find(TILING_FILE_NAME_INDICATOR) < 0 and
obj.name.find(VIEWPORT_FILE_NAME_INDICATOR) < 0) or
obj.name.find(platform) < 0 or
obj.name.find(BENCH_FILE_PREFIX_TEMPLATE % rev) < 0):
continue
file_dic[
obj.name[obj.name.rfind('/') + 1 : ]] = obj.get_contents_as_string()
else:
for f in os.listdir(bench_dir):
if (not os.path.isfile(os.path.join(bench_dir, f)) or
(f.find(TILING_FILE_NAME_INDICATOR) < 0 and
f.find(VIEWPORT_FILE_NAME_INDICATOR) < 0) or
not f.startswith(BENCH_FILE_PREFIX_TEMPLATE % rev)):
continue
file_dic[f] = open(os.path.join(bench_dir, f)).read()
if not file_dic:
raise Exception('No bench file found in "%s" or Google Storage.' %
bench_dir)
return file_dic
def GetTileMatrix(layout, tile_size, values, viewport):
"""For the given tile layout and per-tile bench values, returns a matrix of
bench values with tiles outside the given viewport set to 0.
layout, tile_size and viewport are given in string of format <w>x<h>, where
<w> is viewport width or number of tile columns, and <h> is viewport height or
number of tile rows. We truncate tile rows to MAX_TILE_ROWS to adjust for very
long skp's.
values: per-tile benches ordered row-by-row, starting from the top-left tile.
Returns [sum, matrix] where sum is the total bench tile time that covers the
viewport, and matrix is used for visualizing the tiles.
"""
[tile_cols, tile_rows] = [int(i) for i in layout.split('x')]
[tile_x, tile_y] = [int(i) for i in tile_size.split('x')]
[viewport_x, viewport_y] = [int(i) for i in viewport.split('x')]
viewport_cols = int(math.ceil(viewport_x * 1.0 / tile_x))
viewport_rows = int(math.ceil(viewport_y * 1.0 / tile_y))
truncated_tile_rows = min(tile_rows, MAX_TILE_ROWS)
viewport_tile_sum = 0
matrix = [[0 for y in range(tile_cols)] for x in range(truncated_tile_rows)]
for y in range(min(viewport_cols, tile_cols)):
for x in range(min(truncated_tile_rows, viewport_rows)):
matrix[x][y] = values[x * tile_cols + y]
viewport_tile_sum += values[x * tile_cols + y]
return [viewport_tile_sum, matrix]
def GetTileVisCodes(suffix, matrix):
"""Generates and returns strings of [js_codes, row1, row2] which are codes for
visualizing the benches from the given tile config and matrix data.
row1 is used for the first row of heatmaps; row2 is for corresponding tables.
suffix is only used to avoid name conflicts in the whole html output.
"""
this_js = 'var data_%s=new google.visualization.DataTable();' % suffix
for i in range(len(matrix[0])):
this_js += 'data_%s.addColumn("number","%s");' % (suffix, i)
this_js += 'data_%s.addRows(%s);' % (suffix, str(matrix))
# Adds heatmap chart.
this_js += ('var heat_%s=new org.systemsbiology.visualization' % suffix +
'.BioHeatMap(document.getElementById("%s"));' % suffix +
'heat_%s.draw(data_%s,%s);' % (suffix, suffix, DRAW_OPTIONS))
# Adds data table chart.
this_js += ('var table_%s=new google.visualization.Table(document.' % suffix +
'getElementById("t%s"));table_%s.draw(data_%s,%s);\n' % (
suffix, suffix, suffix, TABLE_OPTIONS))
table_row1 = '<td>%s<div id="%s"></div></td>' % (suffix, suffix)
table_row2 = '<td><div id="t%s"></div></td>' % suffix
return [this_js, table_row1, table_row2]
def OutputTileAnalysis(rev, representation_alg, bench_dir, platform):
"""Reads skp bench data and outputs tile vs. viewport analysis for the given
platform.
Ignores data with revisions other than rev. If bench_dir is not empty, read
from the local directory instead of Google Storage.
Uses the provided representation_alg for calculating bench representations.
Returns (js_codes, body_codes): strings of js/html codes for stats and
visualization.
"""
js_codes = ''
body_codes = ('}</script></head><body>'
'<h3>PLATFORM: %s REVISION: %s</h3><br>' % (platform, rev))
bench_dic = {} # [bench][config] -> [layout, [values]]
file_dic = GetFiles(rev, bench_dir, platform)
for f in file_dic:
for point in bench_util.parse('', file_dic[f].split('\n'),
representation_alg):
if point.time_type: # Ignores non-walltime time_type.
continue
bench = point.bench.replace('.skp', '')
config = point.config.replace('simple_', '')
components = config.split('_')
if components[0] == 'viewport':
bench_dic.setdefault(bench, {})[config] = [components[1], [point.time]]
else: # Stores per-tile benches.
bench_dic.setdefault(bench, {})[config] = [
point.tile_layout, point.per_tile_values]
benches = bench_dic.keys()
benches.sort()
for bench in benches:
body_codes += '<h4>%s</h4><br><table><tr>' % bench
heat_plots = '' # For table row of heatmap plots.
table_plots = '' # For table row of data table plots.
# For bar plot legends and values in URL string.
legends = ''
values = ''
keys = bench_dic[bench].keys()
keys.sort()
if not keys[-1].startswith('viewport'): # No viewport to analyze; skip.
continue
else:
# Extracts viewport size, which for all viewport configs is the same.
viewport = bench_dic[bench][keys[-1]][0]
for config in keys:
[layout, value_li] = bench_dic[bench][config]
if config.startswith('tile_'): # For per-tile data, visualize tiles.
tile_size = config.split('_')[1]
if (not re.search(DIMENSIONS_RE, layout) or
not re.search(DIMENSIONS_RE, tile_size) or
not re.search(DIMENSIONS_RE, viewport)):
continue # Skip unrecognized formats.
[viewport_tile_sum, matrix] = GetTileMatrix(
layout, tile_size, value_li, viewport)
values += '%s|' % viewport_tile_sum
[this_js, row1, row2] = GetTileVisCodes(config + '_' + bench, matrix)
heat_plots += row1
table_plots += row2
js_codes += this_js
else: # For viewport data, there is only one element in value_li.
values += '%s|' % sum(value_li)
legends += '%s:%s|' % (config, sum(value_li))
body_codes += (heat_plots + '</tr><tr>' + table_plots + '</tr></table>' +
'<br>' + BAR_CHART_TEMPLATE % (legends[:-1], values[:-1]))
return (js_codes, body_codes)
def main():
"""Parses flags and outputs expected Skia picture bench results."""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option(OPTION_PLATFORM_SHORT, OPTION_PLATFORM,
dest='plat', default=DEFAULT_PLATFORM,
help='Platform to analyze. Set to DEFAULT_PLATFORM if not given.')
parser.add_option(OPTION_REVISION_SHORT, OPTION_REVISION,
dest='rev',
help='(Mandatory) revision number to analyze.')
parser.add_option(OPTION_DIR_SHORT, OPTION_DIR,
dest='log_dir', default='',
help=('(Optional) local directory where bench log files reside. If left '
'empty (by default), will try to read from Google Storage.'))
parser.add_option(OPTION_REPRESENTATION_ALG_SHORT, OPTION_REPRESENTATION_ALG,
dest='alg', default=REPRESENTATION_ALG,
help=('Bench representation algorithm. '
'Default to "%s".' % REPRESENTATION_ALG))
(options, args) = parser.parse_args()
if not (options.rev and options.rev.isdigit()):
parser.error('Please provide correct mandatory flag %s' % OPTION_REVISION)
return
rev = int(options.rev)
(js_codes, body_codes) = OutputTileAnalysis(
rev, options.alg, options.log_dir, options.plat)
print HTML_PREFIX + js_codes + body_codes + HTML_SUFFIX
if '__main__' == __name__:
main()
|
bsd-3-clause
|
LilithWittmann/froide
|
froide/publicbody/south_migrations/0017_auto__add_field_foilaw_email_only.py
|
6
|
10646
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from froide.helper.auth_migration_util import USER_DB_NAME
APP_MODEL, APP_MODEL_NAME = 'account.User', 'account.user'
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FoiLaw.email_only'
db.add_column('publicbody_foilaw', 'email_only', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'FoiLaw.email_only'
db.delete_column('publicbody_foilaw', 'email_only')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
APP_MODEL_NAME: {
'Meta': {'object_name': 'User', 'db_table': "'%s'" % USER_DB_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publicbody.foilaw': {
'Meta': {'object_name': 'FoiLaw'},
'combined': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'letter_end': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'letter_start': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'max_response_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'max_response_time_unit': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mediator': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'mediating_laws'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'meta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '3'}),
'refusal_reasons': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'publicbody.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rank': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'publicbody.publicbody': {
'Meta': {'ordering': "('name',)", 'object_name': 'PublicBody'},
'_created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_creators'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_body_updaters'", 'null': 'True', 'to': "orm['%s']" % APP_MODEL}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classification_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'laws': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['publicbody.FoiLaw']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'request_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'descendants'", 'null': 'True', 'blank': 'True', 'to': "orm['publicbody.PublicBody']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['sites.Site']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicbody.PublicBodyTopic']", 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'website_dump': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'publicbody.publicbodytopic': {
'Meta': {'object_name': 'PublicBodyTopic'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['publicbody']
|
mit
|
treycausey/scikit-learn
|
sklearn/externals/joblib/test/test_parallel.py
|
3
|
9997
|
"""
Test the parallel module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010-2011 Gael Varoquaux
# License: BSD Style, 3 clauses.
import time
import sys
import io
import os
try:
import cPickle as pickle
PickleError = TypeError
except:
import pickle
PickleError = pickle.PicklingError
if sys.version_info[0] == 3:
PickleError = pickle.PicklingError
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
from ..parallel import Parallel, delayed, SafeFunction, WorkerInterrupt, \
mp, cpu_count, VALID_BACKENDS
from ..my_exceptions import JoblibException
import nose
ALL_VALID_BACKENDS = [None] + VALID_BACKENDS
if hasattr(mp, 'get_context'):
# Custom multiprocessing context in Python 3.4+
ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
###############################################################################
def division(x, y):
return x / y
def square(x):
return x ** 2
def exception_raiser(x):
if x == 7:
raise ValueError
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x ** 2 + y + z
###############################################################################
def test_cpu_count():
assert cpu_count() > 0
###############################################################################
# Test parallel
def check_simple_parallel(backend):
X = range(5)
for n_jobs in (1, 2, -1, -2):
nose.tools.assert_equal(
[square(x) for x in X],
Parallel(n_jobs=n_jobs)(delayed(square)(x) for x in X))
try:
# To smoke-test verbosity, we capture stdout
orig_stdout = sys.stdout
orig_stderr = sys.stdout
if sys.version_info[0] == 3:
sys.stderr = io.StringIO()
sys.stderr = io.StringIO()
else:
sys.stdout = io.BytesIO()
sys.stderr = io.BytesIO()
for verbose in (2, 11, 100):
Parallel(n_jobs=-1, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=1, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, pre_dispatch=2,
backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
except Exception as e:
my_stdout = sys.stdout
my_stderr = sys.stderr
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print(unicode(my_stdout.getvalue()))
print(unicode(my_stderr.getvalue()))
raise e
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def test_simple_parallel():
for backend in ALL_VALID_BACKENDS:
yield check_simple_parallel, backend
def nested_loop(backend):
Parallel(n_jobs=2, backend=backend)(
delayed(square)(.01) for _ in range(2))
def check_nested_loop(parent_backend, child_backend):
Parallel(n_jobs=2, backend=parent_backend)(
delayed(nested_loop)(child_backend) for _ in range(2))
def test_nested_loop():
for parent_backend in VALID_BACKENDS:
for child_backend in VALID_BACKENDS:
yield check_nested_loop, parent_backend, child_backend
def increment_input(a):
a[0] += 1
def test_increment_input_with_threads():
"""Input is mutable when using the threading backend"""
a = [0]
Parallel(n_jobs=2, backend="threading")(
delayed(increment_input)(a) for _ in range(5))
nose.tools.assert_equal(a, [5])
def test_parallel_kwargs():
""" Check the keyword argument processing of pmap.
"""
lst = range(10)
for n_jobs in (1, 4):
yield (nose.tools.assert_equal,
[f(x, y=1) for x in lst],
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)
)
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
def g(x):
return x ** 2
nose.tools.assert_raises(PickleError,
Parallel(),
(delayed(g)(x) for x in range(10))
)
def test_error_capture():
# Check that error are captured, and that correct exceptions
# are raised.
if mp is not None:
# A JoblibException will be raised only if there is indeed
# multiprocessing
nose.tools.assert_raises(JoblibException,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
nose.tools.assert_raises(WorkerInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
else:
nose.tools.assert_raises(KeyboardInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
nose.tools.assert_raises(ZeroDivisionError,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
try:
ex = JoblibException()
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception:
# Cannot use 'except as' to maintain Python 2.5 compatibility
ex = sys.exc_info()[1]
nose.tools.assert_false(isinstance(ex, JoblibException))
class Counter(object):
def __init__(self, list1, list2):
self.list1 = list1
self.list2 = list2
def __call__(self, i):
self.list1.append(i)
nose.tools.assert_equal(len(self.list1), len(self.list2))
def consumer(queue, item):
queue.append('Consumed %s' % item)
def check_dispatch_one_job(backend):
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1, backend=backend)(
delayed(consumer)(queue, x) for x in producer())
nose.tools.assert_equal(queue,
['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']
)
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_one_job():
for backend in VALID_BACKENDS:
yield check_dispatch_one_job, backend
def check_dispatch_multiprocessing(backend):
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
if mp is None:
raise nose.SkipTest()
manager = mp.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, pre_dispatch=3, backend=backend)(
delayed(consumer)(queue, 'any') for _ in producer())
# Only 3 tasks are dispatched out of 6. The 4th task is dispatched only
# after any of the first 3 jobs have completed.
nose.tools.assert_equal(list(queue)[:4],
['Produced 0', 'Produced 1', 'Produced 2',
'Consumed any', ])
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_multiprocessing():
for backend in VALID_BACKENDS:
yield check_dispatch_multiprocessing, backend
def test_exception_dispatch():
"Make sure that exception raised during dispatch are indeed captured"
nose.tools.assert_raises(
ValueError,
Parallel(n_jobs=6, pre_dispatch=16, verbose=0),
(delayed(exception_raiser)(i) for i in range(30)),
)
def _reload_joblib():
# Retrieve the path of the parallel module in a robust way
joblib_path = Parallel.__module__.split(os.sep)
joblib_path = joblib_path[:1]
joblib_path.append('parallel.py')
joblib_path = '/'.join(joblib_path)
module = __import__(joblib_path)
# Reload the module. This should trigger a fail
reload(module)
def test_multiple_spawning():
# Test that attempting to launch a new Python after spawned
# subprocesses will raise an error, to avoid infinite loops on
# systems that do not support fork
if not int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)):
raise nose.SkipTest()
nose.tools.assert_raises(ImportError, Parallel(n_jobs=2),
[delayed(_reload_joblib)() for i in range(10)])
###############################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
nose.tools.assert_raises(JoblibException, safe_division, 1, 0)
def test_pre_dispatch_race_condition():
# Check that using pre-dispatch does not yield a race condition on the
# iterable generator that is not thread-safe natively.
# this is a non-regression test for the "Pool seems closed" class of error
for n_tasks in [2, 10, 20]:
for n_jobs in [2, 4, 8, 16]:
Parallel(n_jobs=n_jobs, pre_dispatch="2 * n_jobs")(
delayed(square)(i) for i in range(n_tasks))
|
bsd-3-clause
|
trademob/boto
|
tests/unit/vpc/test_subnet.py
|
113
|
5485
|
from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, Subnet
class TestDescribeSubnets(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeSubnetsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnetSet>
<item>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
<item>
<subnetId>subnet-6e7f829e</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.0.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
</subnetSet>
</DescribeSubnetsResponse>
"""
def test_get_all_subnets(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_subnets(
['subnet-9d4a7b6c', 'subnet-6e7f829e'],
filters=OrderedDict([('state', 'available'),
('vpc-id', ['subnet-9d4a7b6c', 'subnet-6e7f829e'])]))
self.assert_request_parameters({
'Action': 'DescribeSubnets',
'SubnetId.1': 'subnet-9d4a7b6c',
'SubnetId.2': 'subnet-6e7f829e',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'available',
'Filter.2.Name': 'vpc-id',
'Filter.2.Value.1': 'subnet-9d4a7b6c',
'Filter.2.Value.2': 'subnet-6e7f829e'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], Subnet)
self.assertEqual(api_response[0].id, 'subnet-9d4a7b6c')
self.assertEqual(api_response[1].id, 'subnet-6e7f829e')
class TestCreateSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnet>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>pending</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<tagSet/>
</subnet>
</CreateSubnetResponse>
"""
def test_create_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_subnet(
'vpc-1a2b3c4d', '10.0.1.0/24', 'us-east-1a')
self.assert_request_parameters({
'Action': 'CreateSubnet',
'VpcId': 'vpc-1a2b3c4d',
'CidrBlock': '10.0.1.0/24',
'AvailabilityZone': 'us-east-1a'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, Subnet)
self.assertEquals(api_response.id, 'subnet-9d4a7b6c')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.cidr_block, '10.0.1.0/24')
self.assertEquals(api_response.available_ip_address_count, 251)
self.assertEquals(api_response.availability_zone, 'us-east-1a')
class TestDeleteSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteSubnetResponse>
"""
def test_delete_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_subnet('subnet-9d4a7b6c')
self.assert_request_parameters({
'Action': 'DeleteSubnet',
'SubnetId': 'subnet-9d4a7b6c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
|
mit
|
neteler/QGIS
|
python/plugins/processing/modeler/CalculatorModelerAlgorithm.py
|
10
|
5860
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
CalculatorModelerAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QObject, QMetaObject, SIGNAL
from PyQt4.QtGui import QDialogButtonBox, QTextEdit, QLineEdit, QVBoxLayout
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputNumber
from processing.modeler.ModelerParametersDialog import ModelerParametersDialog
from processing.modeler.ModelerAlgorithm import Algorithm
FORMULA = 'FORMULA'
NUMBER = 'NUMBER'
RESULT = 'RESULT'
AVAILABLE_VARIABLES = 10
class CalculatorModelerAlgorithm(GeoAlgorithm):
def defineCharacteristics(self):
self.showInModeler = True
self.showInToolbox = False
self.name = self.tr('Calculator', 'CalculatorModelerAlgorithm')
self.group = self.tr('Modeler-only tools', 'CalculatorModelerAlgorithm')
self.addParameter(ParameterString(FORMULA,
self.tr('Formula', 'CalculatorModelerAlgorithm'), ''))
for i in range(AVAILABLE_VARIABLES):
self.addParameter(ParameterNumber(NUMBER
+ str(i), 'dummy'))
self.addOutput(OutputNumber(RESULT,
self.tr('Result', 'CalculatorModelerAlgorithm')))
def processAlgorithm(self, progress):
formula = self.getParameterValue(FORMULA)
for i in range(AVAILABLE_VARIABLES):
name = NUMBER + str(i)
num = self.getParameterValue(name)
formula = formula.replace(chr(97 + i), str(num))
try:
result = eval(formula)
self.setOutputValue(RESULT, result)
except:
raise GeoAlgorithmExecutionException(
self.tr('Wrong formula: %s', 'CalculatorModelerAlgorithm') % formula)
def getCustomModelerParametersDialog(self, modelAlg, algIndex=None):
return CalculatorModelerParametersDialog(self, modelAlg, algIndex)
class CalculatorModelerParametersDialog(ModelerParametersDialog):
def setupUi(self):
self.valueItems = {}
self.dependentItems = {}
self.resize(650, 450)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel
| QDialogButtonBox.Ok)
self.infoText = QTextEdit()
numbers = self.getAvailableValuesOfType(ParameterNumber, OutputNumber)
text = self.tr('You can refer to model values in your formula, using '
'single-letter variables, as follows:\n', 'CalculatorModelerParametersDialog')
ichar = 97
if numbers:
for number in numbers:
text += chr(ichar) + '->' + self.resolveValueDescription(number) + '\n'
ichar += 1
else:
text += self.tr('\n - No numerical variables are available.', 'CalculatorModelerParametersDialog')
self.infoText.setText(text)
self.infoText.setEnabled(False)
self.formulaText = QLineEdit()
if hasattr(self.formulaText, 'setPlaceholderText'):
self.formulaText.setPlaceholderText(self.tr('[Enter your formula here]', 'CalculatorModelerParametersDialog'))
self.setWindowTitle(self.tr('Calculator', 'CalculatorModelerParametersDialog'))
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setSpacing(2)
self.verticalLayout.setMargin(0)
self.verticalLayout.addWidget(self.infoText)
self.verticalLayout.addWidget(self.formulaText)
self.verticalLayout.addWidget(self.buttonBox)
self.setLayout(self.verticalLayout)
QObject.connect(self.buttonBox, SIGNAL('accepted()'), self.okPressed)
QObject.connect(self.buttonBox, SIGNAL('rejected()'), self.cancelPressed)
QMetaObject.connectSlotsByName(self)
def createAlgorithm(self):
alg = Algorithm('modelertools:calculator')
alg.setName(self.model)
alg.description = self.tr('Calculator', 'CalculatorModelerParametersDialog')
formula = self.formulaText.text()
alg.params[FORMULA] = formula
for i in xrange(AVAILABLE_VARIABLES):
paramname = NUMBER + str(i)
alg.params[paramname] = None
numbers = self.getAvailableValuesOfType(ParameterNumber, OutputNumber)
used = []
for i in range(len(numbers)):
if str(chr(i + 97)) in formula:
used.append(numbers[i])
for i, variable in enumerate(used):
paramname = NUMBER + str(i)
alg.params[paramname] = variable
# TODO check formula is correct
return alg
|
gpl-2.0
|
irwinlove/django
|
tests/m2m_multiple/tests.py
|
228
|
2386
|
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, Category
class M2MMultipleTests(TestCase):
def test_multiple(self):
c1, c2, c3, c4 = [
Category.objects.create(name=name)
for name in ["Sports", "News", "Crime", "Life"]
]
a1 = Article.objects.create(
headline="Area man steals", pub_date=datetime(2005, 11, 27)
)
a1.primary_categories.add(c2, c3)
a1.secondary_categories.add(c4)
a2 = Article.objects.create(
headline="Area man runs", pub_date=datetime(2005, 11, 28)
)
a2.primary_categories.add(c1, c2)
a2.secondary_categories.add(c4)
self.assertQuerysetEqual(
a1.primary_categories.all(), [
"Crime",
"News",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a2.primary_categories.all(), [
"News",
"Sports",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a1.secondary_categories.all(), [
"Life",
],
lambda c: c.name
)
self.assertQuerysetEqual(
c1.primary_article_set.all(), [
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c1.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c2.primary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c2.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c3.primary_article_set.all(), [
"Area man steals",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c3.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.primary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.secondary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
|
bsd-3-clause
|
Planigle/planigle
|
node_modules/node-gyp/gyp/pylib/gyp/common.py
|
1292
|
20063
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
gpl-2.0
|
brianmckenna/sci-wms
|
wms/migrations/0007_auto_20150424_1604.py
|
2
|
1179
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wms', '0006_auto_20150424_1058'),
]
operations = [
migrations.RemoveField(
model_name='style',
name='style',
),
migrations.AddField(
model_name='style',
name='colormap',
field=models.CharField(default=b'jet', max_length=200),
preserve_default=True,
),
migrations.AddField(
model_name='style',
name='image_type',
field=models.CharField(default=b'filledcontours', max_length=200),
preserve_default=True,
),
migrations.AddField(
model_name='style',
name='param_loc',
field=models.CharField(default=b'grid', max_length=200),
preserve_default=True,
),
migrations.AddField(
model_name='style',
name='wildcard',
field=models.CharField(default=b'False', max_length=200),
preserve_default=True,
),
]
|
gpl-3.0
|
zstackio/zstack-woodpecker
|
integrationtest/vm/ha/test_mysql_stop_create_vm.py
|
2
|
2011
|
'''
Integration Test for creating KVM VM in HA mode with mysql stop on one node.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm1 = None
vm2 = None
node_ip = None
def test():
global vm1
global vm2
global node_ip
vm1 = test_stub.create_basic_vm()
node_ip = os.environ.get('node1Ip')
test_util.test_logger("stop mysql on node: %s" % (node_ip))
cmd = "service mysql stop"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
time.sleep(50)
test_stub.exercise_connection(600)
test_util.test_logger("create vm to check if it still works")
vm1.check()
vm1.destroy()
vm2 = test_stub.create_basic_vm()
vm2.check()
vm2.destroy()
cmd = "service mysql start"
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
time.sleep(50)
test_stub.exercise_connection(600)
test_util.test_pass('Create VM Test Mysql Stop on one node Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm1
global vm2
global node_ip
cmd = "service mysql start"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
rsp = test_lib.lib_execute_ssh_cmd(node_ip, host_username, host_password, cmd, 180)
time.sleep(50)
test_stub.exercise_connection(600)
if vm1:
try:
vm1.destroy()
except:
pass
if vm2:
try:
vm2.destroy()
except:
pass
|
apache-2.0
|
FRESNA/atlite
|
atlite/utils.py
|
1
|
5135
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2019 The Atlite Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
General utility functions for internal use.
"""
from .gis import maybe_swap_spatial_dims
import progressbar as pgb
from pathlib import Path
import pandas as pd
import xarray as xr
import textwrap
import re
import warnings
from .datasets import modules as datamodules
import logging
logger = logging.getLogger(__name__)
def make_optional_progressbar(show, prefix, max_value=None):
warnings.warn("make_optional_progressbar() is deprecated and will be removed "
"in the next version.", warnings.DeprecationWarning)
if show:
widgets = [
pgb.widgets.Percentage(),
' ',
pgb.widgets.SimpleProgress(
format='(%s)' %
pgb.widgets.SimpleProgress.DEFAULT_FORMAT),
' ',
pgb.widgets.Bar(),
' ',
pgb.widgets.Timer(),
' ',
pgb.widgets.ETA()]
if not prefix.endswith(": "):
prefix = prefix.strip() + ": "
maybe_progressbar = pgb.ProgressBar(prefix=prefix, widgets=widgets,
max_value=max_value)
else:
def maybe_progressbar(x):
return x
return maybe_progressbar
def migrate_from_cutout_directory(old_cutout_dir, path):
"""Convert an old style cutout directory to new style netcdf file"""
old_cutout_dir = Path(old_cutout_dir)
with xr.open_dataset(old_cutout_dir / "meta.nc") as meta:
newname = f"{old_cutout_dir.name}.nc"
module = meta.attrs["module"]
minX, maxX = meta.indexes['x'][[0, -1]]
minY, maxY = sorted(meta.indexes['y'][[0, -1]])
minT, maxT = meta.indexes['time'][[0, -1]].strftime("%Y-%m")
logger.warning(textwrap.dedent(f"""
Found an old-style directory-like cutout. It can manually be
recreated using
cutout = atlite.Cutout("{newname}",
module="{module}",
time=slice("{minT}", "{maxT}"),
x=slice({minX}, {maxX}),
y=slice({minY}, {maxY})
cutout.prepare()
but we are trying to offer an automated migration as well ...
"""))
try:
data = xr.open_mfdataset(str(old_cutout_dir / "[12]*.nc"),
combine="by_coords")
data.attrs.update(meta.attrs)
except xr.MergeError:
logger.exception(
"Automatic migration failed. Re-create the cutout "
"with the command above!")
raise
data = maybe_swap_spatial_dims(data)
module = data.attrs["module"]
data.attrs['prepared_features'] = list(datamodules[module].features)
for v in data:
data[v].attrs['module'] = module
fd = datamodules[module].features.items()
features = [k for k, l in fd if v in l]
data[v].attrs['feature'] = features.pop() if features else 'undefined'
path = Path(path).with_suffix(".nc")
logger.info(f"Writing cutout data to {path}. When done, load it again using"
f"\n\n\tatlite.Cutout('{path}')")
data.to_netcdf(path)
return data
def timeindex_from_slice(timeslice):
end = pd.Timestamp(timeslice.end) + pd.offsets.DateOffset(months=1)
return pd.date_range(timeslice.start, end, freq="1h", closed="left")
class arrowdict(dict):
"""
A subclass of dict, which allows you to get
items in the dict using the attribute syntax!
"""
def __getattr__(self, item):
try:
return self.__getitem__(item)
except KeyError as e:
raise AttributeError(e.args[0])
_re_pattern = re.compile('[a-zA-Z_][a-zA-Z0-9_]*')
def __dir__(self):
dict_keys = []
for k in self.keys():
if isinstance(k, str):
m = self._re_pattern.match(k)
if m:
dict_keys.append(m.string)
return dict_keys
class CachedAttribute(object):
'''
Computes attribute value and caches it in the instance.
From the Python Cookbook (Denis Otkidach)
This decorator allows you to create a property which can be
computed once and accessed many times. Sort of like memoization.
'''
def __init__(self, method, name=None, doc=None):
# record the unbound-method and the name
self.method = method
self.name = name or method.__name__
self.__doc__ = doc or method.__doc__
def __get__(self, inst, cls):
if inst is None:
# instance attribute accessed on class, return self
# You get here if you write `Foo.bar`
return self
# compute, cache and return the instance's attribute value
result = self.method(inst)
# setattr redefines the instance's attribute so this doesn't get called
# again
setattr(inst, self.name, result)
return result
|
gpl-3.0
|
koyuawsmbrtn/eclock
|
windows/Python27/Lib/site-packages/docutils/parsers/rst/tableparser.py
|
112
|
21007
|
# $Id: tableparser.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
from docutils.utils import strip_combining_chars
class TableMarkupError(DataError):
"""
Raise if there is any problem with table markup.
The keyword argument `offset` denotes the offset of the problem
from the table's start line.
"""
def __init__(self, *args, **kwargs):
self.offset = kwargs.pop('offset', 0)
DataError.__init__(self, *args)
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
'(table lines %s and %s); only one allowed.'
% (self.head_body_sep+1, i+1), offset=i)
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.',
offset=i)
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return False
return True
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = self.colseps.keys() # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while True:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete in table '
'line %s.' % (offset+1),
offset=offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem '
'in table line %s.' % (offset+2),
offset=offset+1)
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insignificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
lines = [strip_combining_chars(line) for line in lines]
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin '
'in table line %s.' % (first_line+offset+1),
offset=first_line+offset)
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in newdata.items():
master.setdefault(key, []).extend(values)
|
gpl-2.0
|
acsone/acsone-addons
|
account_analytic_project_id/__init__.py
|
1
|
1072
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Stéphane Bidoul
# Copyright (c) 2012 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_analytic_project_id
from .post_install import set_account_analytic_account_project_id
|
agpl-3.0
|
rocky/python-xdis
|
xdis/opcodes/base.py
|
1
|
14179
|
# (C) Copyright 2017, 2019-2021 by Rocky Bernstein
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Common routines for entering and classifiying opcodes. Inspired by,
limited by, and somewhat compatible with the corresponding
Python opcode.py structures
"""
from copy import deepcopy
from xdis.cross_dis import (
findlinestarts,
findlabels,
get_jump_target_maps,
get_jump_targets,
)
from xdis import wordcode
from xdis import IS_PYPY, PYTHON_VERSION
cmp_op = (
"<",
"<=",
"==",
"!=",
">",
">=",
"in",
"not-in",
"is",
"is-not",
"exception-match",
"BAD",
)
# Opcodes greater than 90 take an instruction operand or "argument"
# as opcode.py likes to call it.
HAVE_ARGUMENT = 90
fields2copy = """
hascompare hascondition
hasconst hasfree hasjabs hasjrel haslocal
hasname hasnargs hasstore hasvargs oppop oppush
nofollow
""".split()
def init_opdata(l, from_mod, version=None, is_pypy=False):
"""Sets up a number of the structures found in Python's
opcode.py. Python opcode.py routines assign attributes to modules.
In order to do this in a modular way here, the local dictionary
for the module is passed.
"""
if version:
l["python_version"] = version
l["is_pypy"] = is_pypy
l["cmp_op"] = cmp_op
l["HAVE_ARGUMENT"] = HAVE_ARGUMENT
l["findlinestarts"] = findlinestarts
if version <= 3.5:
l["findlabels"] = findlabels
l["get_jump_targets"] = get_jump_targets
l["get_jump_target_maps"] = get_jump_target_maps
else:
l["findlabels"] = wordcode.findlabels
l["get_jump_targets"] = wordcode.get_jump_targets
l["get_jump_target_maps"] = wordcode.get_jump_target_maps
l["opmap"] = deepcopy(from_mod.opmap)
l["opname"] = deepcopy(from_mod.opname)
for field in fields2copy:
l[field] = list(getattr(from_mod, field))
def compare_op(l, name, op, pop=2, push=1):
def_op(l, name, op, pop, push)
l["hascompare"].append(op)
def conditional_op(l, name, op):
l["hascompare"].append(op)
def const_op(l, name, op, pop=0, push=1):
def_op(l, name, op, pop, push)
l["hasconst"].append(op)
def def_op(l, op_name, opcode, pop=-2, push=-2, fallthrough=True):
l["opname"][opcode] = op_name
l["opmap"][op_name] = opcode
l["oppush"][opcode] = push
l["oppop"][opcode] = pop
if not fallthrough:
l["nofollow"].append(opcode)
def free_op(l, name, op, pop=0, push=1):
def_op(l, name, op, pop, push)
l["hasfree"].append(op)
def jabs_op(l, name, op, pop=0, push=0, conditional=False, fallthrough=True):
def_op(l, name, op, pop, push, fallthrough=fallthrough)
l["hasjabs"].append(op)
if conditional:
l["hascondition"].append(op)
def jrel_op(l, name, op, pop=0, push=0, conditional=False, fallthrough=True):
def_op(l, name, op, pop, push)
l["hasjrel"].append(op)
if conditional:
l["hascondition"].append(op)
def local_op(l, name, op, pop=0, push=1):
def_op(l, name, op, pop, push)
l["haslocal"].append(op)
def name_op(l, op_name, op_code, pop=-2, push=-2):
def_op(l, op_name, op_code, pop, push)
l["hasname"].append(op_code)
def nargs_op(l, name, op, pop=-2, push=-2, fallthrough=True):
def_op(l, name, op, pop, push, fallthrough=fallthrough)
l["hasnargs"].append(op)
def rm_op(l, name, op):
"""Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op.
"""
# opname is an array, so we need to keep the position in there.
l["opname"][op] = "<%s>" % op
if op in l["hasconst"]:
l["hasconst"].remove(op)
if op in l["hascompare"]:
l["hascompare"].remove(op)
if op in l["hascondition"]:
l["hascondition"].remove(op)
if op in l["hasfree"]:
l["hasfree"].remove(op)
if op in l["hasjabs"]:
l["hasjabs"].remove(op)
if op in l["hasname"]:
l["hasname"].remove(op)
if op in l["hasjrel"]:
l["hasjrel"].remove(op)
if op in l["haslocal"]:
l["haslocal"].remove(op)
if op in l["hasname"]:
l["hasname"].remove(op)
if op in l["hasnargs"]:
l["hasnargs"].remove(op)
if op in l["hasvargs"]:
l["hasvargs"].remove(op)
if op in l["nofollow"]:
l["nofollow"].remove(op)
assert l["opmap"][name] == op
del l["opmap"][name]
def store_op(l, name, op, pop=0, push=1, is_type="def"):
if is_type == "name":
name_op(l, name, op, pop, push)
elif is_type == "local":
local_op(l, name, op, pop, push)
elif is_type == "free":
free_op(l, name, op, pop, push)
else:
assert is_type == "def"
def_op(l, name, op, pop, push)
l["hasstore"].append(op)
# This is not in Python. The operand indicates how
# items on the pop from the stack. BUILD_TUPLE_UNPACK
# is line this.
def varargs_op(l, op_name, op_code, pop=-1, push=1):
def_op(l, op_name, op_code, pop, push)
l["hasvargs"].append(op_code)
# Some of the convoluted code below reflects some of the
# many Python idiocies over the years.
def finalize_opcodes(l):
# Not sure why, but opcode.py address has opcode.EXTENDED_ARG
# as well as opmap['EXTENDED_ARG']
l["EXTENDED_ARG"] = l["opmap"]["EXTENDED_ARG"]
# In Python 3.6+ this is 8, but we expect
# those opcodes to set that
if "EXTENDED_ARG_SHIFT" not in l:
l["EXTENDED_ARG_SHIFT"] = 16
l["ARG_MAX_VALUE"] = (1 << l["EXTENDED_ARG_SHIFT"]) - 1
l["EXTENDED_ARG"] = l["opmap"]["EXTENDED_ARG"]
l["opmap"] = fix_opcode_names(l["opmap"])
# Now add in the attributes into the module
for op in l["opmap"]:
l[op] = l["opmap"][op]
l["JUMP_OPs"] = frozenset(l["hasjrel"] + l["hasjabs"])
l["NOFOLLOW"] = frozenset(l["nofollow"])
opcode_check(l)
return
def fix_opcode_names(opmap):
"""
Python stupidly named some OPCODES with a + which prevents using opcode name
directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we
can then use opcode_23.SLICE_3. Later Python's fix this.
"""
return dict([(k.replace("+", "_"), v) for (k, v) in opmap.items()])
def update_pj3(g, l):
g.update({"PJIF": l["opmap"]["POP_JUMP_IF_FALSE"]})
g.update({"PJIT": l["opmap"]["POP_JUMP_IF_TRUE"]})
update_sets(l)
def update_pj2(g, l):
g.update({"PJIF": l["opmap"]["JUMP_IF_FALSE"]})
g.update({"PJIT": l["opmap"]["JUMP_IF_TRUE"]})
update_sets(l)
def update_sets(l):
l["COMPARE_OPS"] = frozenset(l["hascompare"])
l["CONDITION_OPS"] = frozenset(l["hascondition"])
l["CONST_OPS"] = frozenset(l["hasconst"])
l["FREE_OPS"] = frozenset(l["hasfree"])
l["JREL_OPS"] = frozenset(l["hasjrel"])
l["JABS_OPS"] = frozenset(l["hasjabs"])
l["JUMP_UNCONDITONAL"] = frozenset(
[l["opmap"]["JUMP_ABSOLUTE"], l["opmap"]["JUMP_FORWARD"]]
)
if l["python_version"] < 3.8:
l["LOOP_OPS"] = frozenset([l["opmap"]["SETUP_LOOP"]])
else:
l["LOOP_OPS"] = frozenset()
l["LOCAL_OPS"] = frozenset(l["haslocal"])
l["JUMP_OPS"] = (
l["JABS_OPS"] | l["JREL_OPS"] | l["LOOP_OPS"] | l["JUMP_UNCONDITONAL"]
)
l["NAME_OPS"] = frozenset(l["hasname"])
l["NARGS_OPS"] = frozenset(l["hasnargs"])
l["VARGS_OPS"] = frozenset(l["hasvargs"])
l["STORE_OPS"] = frozenset(l["hasstore"])
def extended_format_CALL_FUNCTION(opc, instructions):
"""call_function_inst should be a "CALL_FUNCTION_KW" instruction. Look in
`instructions` to see if we can find a method name. If not we'll
return None.
"""
# From opcode description: argc indicates the total number of positional and keyword arguments.
# Sometimes the function name is in the stack arg positions back.
call_function_inst = instructions[0]
assert call_function_inst.opname == "CALL_FUNCTION"
argc = call_function_inst.arg
(
name_default,
pos_args,
) = divmod(argc, 256)
function_pos = pos_args + name_default * 2 + 1
assert len(instructions) >= function_pos + 1
for i, inst in enumerate(instructions[1:]):
if i + 1 == function_pos:
i += 1
break
if inst.is_jump_target:
i += 1
break
# Make sure we are in the same basic block
# and ... ?
opcode = inst.opcode
if inst.optype in ("nargs", "vargs"):
break
if inst.opname == "LOAD_ATTR" or inst.optype != "name":
function_pos += (opc.oppop[opcode] - opc.oppush[opcode]) + 1
if inst.opname in ("CALL_FUNCTION", "CALL_FUNCTION_KW"):
break
pass
s = ""
if i == function_pos:
if instructions[function_pos].opname in (
"LOAD_CONST",
"LOAD_GLOBAL",
"LOAD_ATTR",
"LOAD_NAME",
):
s = resolved_attrs(instructions[function_pos:])
s += ": "
pass
pass
s += format_CALL_FUNCTION_pos_name_encoded(call_function_inst.arg)
return s
def resolved_attrs(instructions):
resolved = []
for inst in instructions:
name = inst.argrepr
if name:
if name[0] == "'" and name[-1] == "'":
name = name[1:-1]
else:
name = ""
resolved.append(name)
if inst.opname != "LOAD_ATTR":
break
return ".".join(reversed(resolved))
def extended_format_ATTR(opc, instructions):
if instructions[1].opname in (
"LOAD_CONST",
"LOAD_GLOBAL",
"LOAD_ATTR",
"LOAD_NAME",
):
return "%s.%s" % (instructions[1].argrepr, instructions[0].argrepr)
def extended_format_MAKE_FUNCTION_older(opc, instructions):
"""make_function_inst should be a "MAKE_FUNCTION" or "MAKE_CLOSURE" instruction. TOS
should have the function or closure name.
"""
# From opcode description: argc indicates the total number of positional and keyword arguments.
# Sometimes the function name is in the stack arg positions back.
assert len(instructions) >= 2
inst = instructions[0]
assert inst.opname in ("MAKE_FUNCTION", "MAKE_CLOSURE")
s = ""
code_inst = instructions[1]
if code_inst.opname == "LOAD_CONST" and hasattr(code_inst.argval, "co_name"):
s += "%s: " % code_inst.argval.co_name
pass
s += format_MAKE_FUNCTION_default_argc(inst.arg)
return s
def extended_format_RAISE_VARARGS_older(opc, instructions):
raise_inst = instructions[0]
assert raise_inst.opname == "RAISE_VARARGS"
assert len(instructions) >= 1
if instructions[1].opname in (
"LOAD_CONST",
"LOAD_GLOBAL",
"LOAD_ATTR",
"LOAD_NAME",
):
return resolved_attrs(instructions[1:])
return format_RAISE_VARARGS_older(raise_inst.argval)
def extended_format_RETURN_VALUE(opc, instructions):
return_inst = instructions[0]
assert return_inst.opname == "RETURN_VALUE"
assert len(instructions) >= 1
if instructions[1].opname in (
"LOAD_CONST",
"LOAD_GLOBAL",
"LOAD_ATTR",
"LOAD_NAME",
):
return resolved_attrs(instructions[1:])
return None
def format_extended_arg(arg):
return str(arg * (1 << 16))
def format_CALL_FUNCTION_pos_name_encoded(argc):
"""Encoded positional and named args. Used to
up to about 3.6 where wordcodes are used and
a different encoding occurs. Pypy36 though
sticks to this encoded version though."""
name_default, pos_args = divmod(argc, 256)
return "%d positional, %d named" % (pos_args, name_default)
# After Python 3.2
def format_MAKE_FUNCTION_arg(argc):
name_and_annotate, pos_args = divmod(argc, 256)
annotate_args, name_default = divmod(name_and_annotate, 256)
return "%d positional, %d name and default, %d annotations" % (
pos_args,
name_default,
annotate_args,
)
# Up to and including Python 3.2
def format_MAKE_FUNCTION_default_argc(argc):
return "%d default parameters" % argc
# Up until 3.7
def format_RAISE_VARARGS_older(argc):
assert 0 <= argc <= 3
if argc == 0:
return "reraise"
elif argc == 1:
return "exception"
elif argc == 2:
return "exception, parameter"
elif argc == 3:
return "exception, parameter, traceback"
def opcode_check(l):
"""When the version of Python we are running happens
to have the same opcode set as the opcode we are
importing, we perform checks to make sure our opcode
set matches exactly.
"""
# Python 2.6 reports 2.6000000000000001
if abs(PYTHON_VERSION - l["python_version"]) <= 0.01 and IS_PYPY == l["is_pypy"]:
try:
import dis
opmap = fix_opcode_names(dis.opmap)
# print(set(opmap.items()) - set(l['opmap'].items()))
# print(set(l['opmap'].items()) - set(opmap.items()))
assert all(item in opmap.items() for item in l["opmap"].items())
assert all(item in l["opmap"].items() for item in opmap.items())
except:
pass
def dump_opcodes(opmap):
"""Utility for dumping opcodes"""
op2name = {}
for k in opmap.keys():
op2name[opmap[k]] = k
for i in sorted(op2name.keys()):
print("%-3s %s" % (str(i), op2name[i]))
|
gpl-2.0
|
chenc10/Spark-PAF
|
python/run-tests.py
|
4
|
7629
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({'SPARK_TESTING': '1', 'PYSPARK_PYTHON': which(pyspark_python)})
LOGGER.debug("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.6", "python3.4", "pypy"] if which(x)]
if "python2.6" not in python_execs:
LOGGER.warning("Not testing against `python2.6` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
task_queue = Queue.Queue()
for python_exec in python_execs:
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
task_queue.put((python_exec, test_goal))
def process_queue(task_queue):
while True:
try:
(python_exec, test_goal) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
|
apache-2.0
|
akshaynathr/mailman
|
src/mailman/interfaces/preferences.py
|
3
|
2789
|
# Copyright (C) 2007-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Interface for preferences."""
from __future__ import absolute_import, unicode_literals
__metaclass__ = type
__all__ = [
'IPreferences',
]
from zope.interface import Interface, Attribute
class IPreferences(Interface):
"""Delivery related information."""
acknowledge_posts = Attribute(
"""Send an acknowledgment for every posting?
This preference can be True, False, or None. True means the user is
sent a receipt for each message they send to the mailing list. False
means that no receipt is sent. None means no preference is
specified.""")
preferred_language = Attribute(
"""The preferred language for interacting with a mailing list.
This is either the language code for the preferred language, or None
meaning no preferred language is specified.""")
receive_list_copy = Attribute(
"""Should an explicit recipient receive a list copy?
When a list member is explicitly named in a message's recipients
(e.g. the To or CC headers), and this preference is True, the
recipient will still receive a list copy of the message. When False,
this list copy will be suppressed. None means no preference is
specified.""")
receive_own_postings = Attribute(
"""Should the poster get a list copy of their own messages?
When this preference is True, a list copy will be sent to the poster
of all messages. When False, this list copy will be suppressed. None
means no preference is specified.""")
delivery_mode = Attribute(
"""The preferred delivery mode.
This is an enum constant of the type DeliveryMode. It may also be
None which means that no preference is specified.""")
delivery_status = Attribute(
"""The delivery status.
This is an enum constant of type DeliveryStatus. It may also be None
which means that no preference is specified.
XXX I'm not sure this is the right place to put this.""")
|
gpl-3.0
|
justathoughtor2/atomicApe
|
cygwin/lib/python2.7/site-packages/astroid/inference.py
|
1
|
12104
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to handle inference on astroid trees
"""
from __future__ import print_function
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions
from astroid import manager
from astroid import nodes
from astroid import protocols
from astroid import util
MANAGER = manager.AstroidManager()
# .infer method ###############################################################
def infer_end(self, context=None):
"""inference's end for node such as Module, ClassDef, FunctionDef,
Const...
"""
yield self
nodes.Module._infer = infer_end
nodes.ClassDef._infer = infer_end
nodes.FunctionDef._infer = infer_end
nodes.Lambda._infer = infer_end
nodes.Const._infer = infer_end
nodes.List._infer = infer_end
nodes.Tuple._infer = infer_end
nodes.Dict._infer = infer_end
nodes.Set._infer = infer_end
def _higher_function_scope(node):
""" Search for the first function which encloses the given
scope. This can be used for looking up in that function's
scope, in case looking up in a lower scope for a particular
name fails.
:param node: A scope node.
:returns:
``None``, if no parent function scope was found,
otherwise an instance of :class:`astroid.scoped_nodes.Function`,
which encloses the given node.
"""
current = node
while current.parent and not isinstance(current.parent, nodes.FunctionDef):
current = current.parent
if current and current.parent:
return current.parent
def infer_name(self, context=None):
"""infer a Name: use name lookup rules"""
frame, stmts = self.lookup(self.name)
if not stmts:
# Try to see if the name is enclosed in a nested function
# and use the higher (first function) scope for searching.
# TODO: should this be promoted to other nodes as well?
parent_function = _higher_function_scope(self.scope())
if parent_function:
_, stmts = parent_function.lookup(self.name)
if not stmts:
raise exceptions.UnresolvableName(self.name)
context = context.clone()
context.lookupname = self.name
return bases._infer_stmts(stmts, context, frame)
nodes.Name._infer = bases.path_wrapper(infer_name)
nodes.AssignName.infer_lhs = infer_name # won't work with a path wrapper
@bases.path_wrapper
@bases.raise_if_nothing_inferred
def infer_call(self, context=None):
"""infer a Call node by trying to guess what the function returns"""
callcontext = context.clone()
callcontext.callcontext = contextmod.CallContext(args=self.args,
keywords=self.keywords)
callcontext.boundnode = None
for callee in self.func.infer(context):
if callee is util.YES:
yield callee
continue
try:
if hasattr(callee, 'infer_call_result'):
for inferred in callee.infer_call_result(self, callcontext):
yield inferred
except exceptions.InferenceError:
## XXX log error ?
continue
nodes.Call._infer = infer_call
@bases.path_wrapper
def infer_import(self, context=None, asname=True):
"""infer an Import node: return the imported module/object"""
name = context.lookupname
if name is None:
raise exceptions.InferenceError()
if asname:
yield self.do_import_module(self.real_name(name))
else:
yield self.do_import_module(name)
nodes.Import._infer = infer_import
def infer_name_module(self, name):
context = contextmod.InferenceContext()
context.lookupname = name
return self.infer(context, asname=False)
nodes.Import.infer_name_module = infer_name_module
@bases.path_wrapper
def infer_import_from(self, context=None, asname=True):
"""infer a ImportFrom node: return the imported module/object"""
name = context.lookupname
if name is None:
raise exceptions.InferenceError()
if asname:
name = self.real_name(name)
module = self.do_import_module()
try:
context = contextmod.copy_context(context)
context.lookupname = name
stmts = module.getattr(name, ignore_locals=module is self.root())
return bases._infer_stmts(stmts, context)
except exceptions.NotFoundError:
raise exceptions.InferenceError(name)
nodes.ImportFrom._infer = infer_import_from
@bases.raise_if_nothing_inferred
def infer_attribute(self, context=None):
"""infer an Attribute node by using getattr on the associated object"""
for owner in self.expr.infer(context):
if owner is util.YES:
yield owner
continue
try:
context.boundnode = owner
for obj in owner.igetattr(self.attrname, context):
yield obj
context.boundnode = None
except (exceptions.NotFoundError, exceptions.InferenceError):
context.boundnode = None
except AttributeError:
# XXX method / function
context.boundnode = None
nodes.Attribute._infer = bases.path_wrapper(infer_attribute)
nodes.AssignAttr.infer_lhs = infer_attribute # # won't work with a path wrapper
@bases.path_wrapper
def infer_global(self, context=None):
if context.lookupname is None:
raise exceptions.InferenceError()
try:
return bases._infer_stmts(self.root().getattr(context.lookupname),
context)
except exceptions.NotFoundError:
raise exceptions.InferenceError()
nodes.Global._infer = infer_global
@bases.raise_if_nothing_inferred
def infer_subscript(self, context=None):
"""Inference for subscripts
We're understanding if the index is a Const
or a slice, passing the result of inference
to the value's `getitem` method, which should
handle each supported index type accordingly.
"""
value = next(self.value.infer(context))
if value is util.YES:
yield util.YES
return
index = next(self.slice.infer(context))
if index is util.YES:
yield util.YES
return
if isinstance(index, nodes.Const):
try:
assigned = value.getitem(index.value, context)
except AttributeError:
raise exceptions.InferenceError()
except (IndexError, TypeError):
yield util.YES
return
# Prevent inferring if the infered subscript
# is the same as the original subscripted object.
if self is assigned or assigned is util.YES:
yield util.YES
return
for infered in assigned.infer(context):
yield infered
else:
raise exceptions.InferenceError()
nodes.Subscript._infer = bases.path_wrapper(infer_subscript)
nodes.Subscript.infer_lhs = infer_subscript
@bases.raise_if_nothing_inferred
def infer_unaryop(self, context=None):
for operand in self.operand.infer(context):
try:
yield operand.infer_unary_op(self.op)
except TypeError:
continue
except AttributeError:
meth = protocols.UNARY_OP_METHOD[self.op]
if meth is None:
yield util.YES
else:
try:
# XXX just suppose if the type implement meth, returned type
# will be the same
operand.getattr(meth)
yield operand
except GeneratorExit:
raise
except:
yield util.YES
nodes.UnaryOp._infer = bases.path_wrapper(infer_unaryop)
def _infer_binop(operator, operand1, operand2, context, failures=None):
if operand1 is util.YES:
yield operand1
return
try:
for valnode in operand1.infer_binary_op(operator, operand2, context):
yield valnode
except AttributeError:
try:
# XXX just suppose if the type implement meth, returned type
# will be the same
operand1.getattr(protocols.BIN_OP_METHOD[operator])
yield operand1
except:
if failures is None:
yield util.YES
else:
failures.append(operand1)
@bases.yes_if_nothing_inferred
def infer_binop(self, context=None):
failures = []
for lhs in self.left.infer(context):
for val in _infer_binop(self.op, lhs, self.right, context, failures):
yield val
for lhs in failures:
for rhs in self.right.infer(context):
for val in _infer_binop(self.op, rhs, lhs, context):
yield val
nodes.BinOp._infer = bases.path_wrapper(infer_binop)
def infer_arguments(self, context=None):
name = context.lookupname
if name is None:
raise exceptions.InferenceError()
return protocols._arguments_infer_argname(self, name, context)
nodes.Arguments._infer = infer_arguments
@bases.path_wrapper
def infer_assign(self, context=None):
"""infer a AssignName/AssignAttr: need to inspect the RHS part of the
assign node
"""
stmt = self.statement()
if isinstance(stmt, nodes.AugAssign):
return stmt.infer(context)
stmts = list(self.assigned_stmts(context=context))
return bases._infer_stmts(stmts, context)
nodes.AssignName._infer = infer_assign
nodes.AssignAttr._infer = infer_assign
def infer_augassign(self, context=None):
failures = []
for lhs in self.target.infer_lhs(context):
for val in _infer_binop(self.op, lhs, self.value, context, failures):
yield val
for lhs in failures:
for rhs in self.value.infer(context):
for val in _infer_binop(self.op, rhs, lhs, context):
yield val
nodes.AugAssign._infer = bases.path_wrapper(infer_augassign)
# no infer method on DelName and DelAttr (expected InferenceError)
@bases.path_wrapper
def infer_empty_node(self, context=None):
if not self.has_underlying_object():
yield util.YES
else:
try:
for inferred in MANAGER.infer_ast_from_something(self.object,
context=context):
yield inferred
except exceptions.AstroidError:
yield util.YES
nodes.EmptyNode._infer = infer_empty_node
def infer_index(self, context=None):
return self.value.infer(context)
nodes.Index._infer = infer_index
# TODO: move directly into bases.Instance when the dependency hell
# will be solved.
def instance_getitem(self, index, context=None):
# Rewrap index to Const for this case
index = nodes.Const(index)
if context:
new_context = context.clone()
else:
context = new_context = contextmod.InferenceContext()
# Create a new callcontext for providing index as an argument.
new_context.callcontext = contextmod.CallContext(args=[index])
new_context.boundnode = self
method = next(self.igetattr('__getitem__', context=context))
if not isinstance(method, bases.BoundMethod):
raise exceptions.InferenceError
try:
return next(method.infer_call_result(self, new_context))
except StopIteration:
raise exceptions.InferenceError
bases.Instance.getitem = instance_getitem
|
gpl-3.0
|
yatto/xteam31
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
sternshus/arelle2.7
|
svr-2.7/arelle/plugin/TDnetLoader.py
|
1
|
9285
|
# -*- coding: utf-8 -*-
'''
TDnetLoader is a plug-in to both GUI menu and command line/web service
that loads a TDnet html index file. TDnet is Tokyo Stock Exchange's
Timely Disclosure Network.
(c) Copyright 2014 Mark V Systems Limited, All rights reserved.
'''
from lxml import html
import datetime, re, os
from arelle import FileSource
from arelle.ModelRssObject import ModelRssObject
class TDnetItem:
def __init__(self, modelXbrl, date, dateTime, filingCode, companyName,
title, htmlUrl, entryUrl, stockExchange):
self.cikNumber = None
self.accessionNumber = filingCode
self.fileNumber = None
self.companyName = companyName
self.formType = stockExchange
self.pubDate = dateTime
self.filingDate = date
self.period = None
self.assignedSic = None
self.acceptanceDatetime = dateTime
self.fiscalYearEnd = None
self.htmlUrl = htmlUrl
self.url = entryUrl
self.zippedUrl = entryUrl
self.htmURLs = ((title, htmlUrl),)
self.status = "not tested"
self.results = None
self.assertions = None
self.objectIndex = len(modelXbrl.modelObjects)
modelXbrl.modelObjects.append(self)
def setResults(self, modelXbrl):
self.results = []
self.assertionUnsuccessful = False
# put error codes first, sorted, then assertion result (dict's)
self.status = "pass"
for error in modelXbrl.errors:
if isinstance(error,dict): # assertion results
self.assertions = error
for countSuccessful, countNotsuccessful in error.items():
if countNotsuccessful > 0:
self.assertionUnsuccessful = True
self.status = "unsuccessful"
else: # error code results
self.results.append(error)
self.status = "fail" # error code
self.results.sort()
def objectId(self,refId=""):
"""Returns a string surrogate representing the object index of the model document,
prepended by the refId string.
:param refId: A string to prefix the refId for uniqueless (such as to use in tags for tkinter)
:type refId: str
"""
return "_{0}_{1}".format(refId, self.objectIndex)
datePattern = re.compile(r"\s*([0-9]+)年([0-9]+)月([0-9]+)日")
timePattern = re.compile(r"\s*([0-9]+):([0-9]+)")
nextLocationPattern = re.compile(r"location='(.+)'")
def intCol(elt, attrName, default=None):
try:
return int(elt.get(attrName, default))
except (TypeError, ValueError):
return default
def descendantAttr(elt, descendantName, attrName, default=None):
for descendant in elt.iterdescendants(tag=descendantName):
if descendant.get(attrName):
return descendant.get(attrName).strip()
return default
def tdNetLoader(modelXbrl, mappedUri, filepath, **kwargs):
if not (mappedUri.startswith("https://www.release.tdnet.info/inbs/I_") and
mappedUri.endswith(".html")):
return None # not a td net info file
rssObject = ModelRssObject(modelXbrl, uri=mappedUri, filepath=filepath)
hasMoreSections = True
while hasMoreSections:
# treat tdnet as an RSS feed object
try:
tdInfoDoc = html.parse(filepath)
except (IOError, EnvironmentError):
return None # give up, use ordinary loader
# find date
date = None
for elt in tdInfoDoc.iter():
if elt.tag == "table":
break # no date portion, probably wrong document
if elt.text and datePattern.match(elt.text):
g = datePattern.match(elt.text).groups()
date = datetime.date(int(g[0]), int(g[1]), int(g[2]))
break
if not date:
return None # give up, not a TDnet index document
urlDir = os.path.dirname(mappedUri)
# find <table> with <a>Download in it
for tableElt in tdInfoDoc.iter(tag="table"):
useThisTableElt = False
for aElt in tableElt.iterdescendants(tag="a"):
if "download" in aElt.text.lower():
useThisTableElt = True
break
if useThisTableElt:
cols = {}
for trElt in tableElt.iter(tag="tr"):
col = 0
rowData = {}
for tdElt in trElt.iter(tag="td"):
text = ''.join(t.strip() for t in tdElt.itertext())
if tdElt.get("class") == "tableh": #header
type = {"時刻": "time",
"コード": "code",
"会社名": "companyName",
"表題": "title",
"XBRL": "zipUrl",
"上場取引所": "stockExchange",
"更新履歴": "changeLog"
}.get(text, None)
if type:
cols[col] = type
cols[type] = col
elif col == cols["title"]:
rowData["title"] = text
rowData["pdfUrl"] = descendantAttr(tdElt, "a", "href")
elif col == cols["zipUrl"]:
rowData["zipUrl"] = descendantAttr(tdElt, "a", "href")
elif col in cols: # body
rowData[cols[col]] = text
col += int(tdElt.get("colspan", 1))
if rowData:
time = rowData.get("time", "")
if timePattern.match(time):
g = timePattern.match(time).groups()
dateTime = datetime.datetime(date.year, date.month, date.day,
int(g[0]), int(g[1]))
else:
dateTime = datetime.datetime.now()
filingCode = rowData.get("code")
companyName = rowData.get("companyName")
stockExchange = rowData.get("stockExchange")
title = rowData.get("title")
pdfUrl = rowData.get("pdfUrl")
if pdfUrl:
pdfUrl = urlDir + "/" + pdfUrl
zipUrl = rowData.get("zipUrl")
if zipUrl:
zipUrl = urlDir + "/" + zipUrl
changeLog = rowData.get("changeLog")
# find instance doc in file
instanceUrls = []
if zipUrl:
try:
normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(zipUrl)
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(normalizedUri)
filesource = FileSource.FileSource(filepath)
dir = filesource.dir
filesource.close()
if dir:
for file in dir:
if "ixbrl" in file or file.endswith(".xbrl") or "instance" in file:
instanceUrls.append(zipUrl + "/" + file)
except:
continue # forget this filing
for instanceUrl in instanceUrls:
rssObject.rssItems.append(
TDnetItem(modelXbrl, date, dateTime, filingCode, companyName,
title, pdfUrl, instanceUrl, stockExchange))
# next screen if continuation
hasMoreSections = False
for elt in tdInfoDoc.iter(tag="input"):
if elt.value == "次画面": # next screen button
nextLocation = elt.get("onclick")
if nextLocation and nextLocationPattern.match(nextLocation):
hasMoreSections = True
nextUrl = urlDir + "/" + nextLocationPattern.match(nextLocation).groups()[0]
mappedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(nextUrl)
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(mappedUri)
return rssObject
__pluginInfo__ = {
'name': 'TDnet Loader',
'version': '0.9',
'description': "This plug-in loads Tokyo Stock Exchange Timely Disclosure Network XBRL documents. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
# take out for now: 'CntlrCmdLine.Options': streamingOptionsExtender,
'ModelDocument.PullLoader': tdNetLoader,
}
|
apache-2.0
|
jorik041/paramiko
|
tests/test_sftp_big.py
|
25
|
13712
|
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
some unit tests to make sure sftp works well with large files.
a real actual sftp server is contacted, and a new folder is created there to
do test file operations in (so no existing files will be harmed).
"""
import os
import random
import struct
import sys
import time
import unittest
from paramiko.common import o660
from tests.test_sftp import get_sftp
FOLDER = os.environ.get('TEST_FOLDER', 'temp-testing000')
class BigSFTPTest (unittest.TestCase):
def setUp(self):
global FOLDER
sftp = get_sftp()
for i in range(1000):
FOLDER = FOLDER[:-3] + '%03d' % i
try:
sftp.mkdir(FOLDER)
break
except (IOError, OSError):
pass
def tearDown(self):
sftp = get_sftp()
sftp.rmdir(FOLDER)
def test_1_lots_of_files(self):
"""
create a bunch of files over the same session.
"""
sftp = get_sftp()
numfiles = 100
try:
for i in range(numfiles):
with sftp.open('%s/file%d.txt' % (FOLDER, i), 'w', 1) as f:
f.write('this is file #%d.\n' % i)
sftp.chmod('%s/file%d.txt' % (FOLDER, i), o660)
# now make sure every file is there, by creating a list of filenmes
# and reading them in random order.
numlist = list(range(numfiles))
while len(numlist) > 0:
r = numlist[random.randint(0, len(numlist) - 1)]
with sftp.open('%s/file%d.txt' % (FOLDER, r)) as f:
self.assertEqual(f.readline(), 'this is file #%d.\n' % r)
numlist.remove(r)
finally:
for i in range(numfiles):
try:
sftp.remove('%s/file%d.txt' % (FOLDER, i))
except:
pass
def test_2_big_file(self):
"""
write a 1MB file with no buffering.
"""
sftp = get_sftp()
kblob = (1024 * b'x')
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w') as f:
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
for n in range(1024):
data = f.read(1024)
self.assertEqual(data, kblob)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_3_big_file_pipelined(self):
"""
write a 1MB file, with no linefeeds, using pipelining.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
# read on odd boundaries to make sure the bytes aren't getting scrambled
n = 0
k2blob = kblob + kblob
chunk = 629
size = 1024 * 1024
while n < size:
if n + chunk > size:
chunk = size - n
data = f.read(chunk)
offset = n % 1024
self.assertEqual(data, k2blob[offset:offset + chunk])
n += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_4_prefetch_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
offsets = [base_offset + j * chunk for j in range(100)]
# randomly seek around and read them out
for j in range(100):
offset = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(offset)
f.seek(offset)
data = f.read(chunk)
n_offset = offset % 1024
self.assertEqual(data, k2blob[n_offset:n_offset + chunk])
offset += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_5_readv_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
# make a bunch of offsets and put them in random order
offsets = [base_offset + j * chunk for j in range(100)]
readv_list = []
for j in range(100):
o = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(o)
readv_list.append((o, chunk))
ret = f.readv(readv_list)
for i in range(len(readv_list)):
offset = readv_list[i][0]
n_offset = offset % 1024
self.assertEqual(next(ret), k2blob[n_offset:n_offset + chunk])
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_6_lots_of_prefetching(self):
"""
prefetch a 1MB file a bunch of times, discarding the file object
without using it, to verify that paramiko doesn't get confused.
"""
sftp = get_sftp()
kblob = (1024 * b'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
f.prefetch()
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
f.prefetch()
for n in range(1024):
data = f.read(1024)
self.assertEqual(data, kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_7_prefetch_readv(self):
"""
verify that prefetch and readv don't conflict with each other.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
data = f.read(1024)
self.assertEqual(data, kblob)
chunk_size = 793
base_offset = 512 * 1024
k2blob = kblob + kblob
chunks = [(base_offset + (chunk_size * i), chunk_size) for i in range(20)]
for data in f.readv(chunks):
offset = base_offset % 1024
self.assertEqual(chunk_size, len(data))
self.assertEqual(k2blob[offset:offset + chunk_size], data)
base_offset += chunk_size
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_8_large_readv(self):
"""
verify that a very large readv is broken up correctly and still
returned as a single blob.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
data = list(f.readv([(23 * 1024, 128 * 1024)]))
self.assertEqual(1, len(data))
data = data[0]
self.assertEqual(128 * 1024, len(data))
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_9_big_file_big_buffer(self):
"""
write a 1MB file, with no linefeeds, and a big buffer.
"""
sftp = get_sftp()
mblob = (1024 * 1024 * 'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w', 128 * 1024) as f:
f.write(mblob)
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_A_big_file_renegotiate(self):
"""
write a 1MB file, forcing key renegotiation in the middle.
"""
sftp = get_sftp()
t = sftp.sock.get_transport()
t.packetizer.REKEY_BYTES = 512 * 1024
k32blob = (32 * 1024 * 'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w', 128 * 1024) as f:
for i in range(32):
f.write(k32blob)
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
self.assertNotEqual(t.H, t.session_id)
# try to read it too.
with sftp.open('%s/hongry.txt' % FOLDER, 'r', 128 * 1024) as f:
f.prefetch()
total = 0
while total < 1024 * 1024:
total += len(f.read(32 * 1024))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
t.packetizer.REKEY_BYTES = pow(2, 30)
if __name__ == '__main__':
from tests.test_sftp import SFTPTest
SFTPTest.init_loopback()
from unittest import main
main()
|
lgpl-2.1
|
yuezhonghua/tornado
|
tornado/test/import_test.py
|
124
|
1531
|
# flake8: noqa
from __future__ import absolute_import, division, print_function, with_statement
from tornado.test.util import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
import tornado.concurrent
# import tornado.curl_httpclient # depends on pycurl
import tornado.escape
import tornado.gen
import tornado.http1connection
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.log
import tornado.netutil
import tornado.options
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.tcpserver
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl
except ImportError:
pass
else:
import tornado.curl_httpclient
|
apache-2.0
|
jluissandovalm/smd_lammps
|
tools/moltemplate/src/nbody_alternate_symmetry/nbody_Impropers_Jcenter.py
|
19
|
3134
|
from nbody_graph_search import Ugraph
# To find 4-body "improper" interactions,
# (by default, most of the time), we would use this subgraph:
# 0
# * 1st bond connects atoms 1 and 0
# | => 2nd bond connects atoms 1 and 2
# _.*._ 3rd bond connects atoms 1 and 3
# *' 1 `*
# 2 3
#
# In OPLS, the central atom is the second atom ("1").
# This differs from other force-fields.
# We take this detail into account in the line below:
bond_pattern = Ugraph([(1,0), (1,2), (1,3)])
# As with other force-fields, the improper-angle is the angle between the planes
# defined by the first three atoms (0,1,2) and last three atoms (1,2,3).
# (This is implemented in LAMMPS using an improper_style which requires
# that the atoms in the interaction will be listed in this order: 0,1,2,3.)
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 4 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an improper interactions is a function of the improper angle.
The improper-angle is usually defined as the angle between planes formed
by atoms 0,1,2 & 1,2,3. (Alternately, it is sometimes defined as the
angle between the 0,1,2 plane and atom 3.)
This angle does not change when swapping the OUTER pair of atoms (0 and 3)
(except for a change of sign, which does not matter since the energy functions
used are typically sign invariant. Furthermore, neither of OUTER pair of atoms
are the central atom. There are 3!=6 ways of ordering the remaining 3 atoms.)
Consequently it does not make sense to define a separate 4-body improper-
interaction between atoms 0,1,2,3 AS WELL AS between 3,1,2,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the last atom. (Later we will check to see if we
have already defined an interaction between these 4 atoms. If not then
we create a new one.)
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
# match[1][0:2] contains the ID numbers for the 3 bonds
bond0 = match[1][0]
bond1 = match[1][1]
bond2 = match[1][2]
if atom0 <= atom3:
#return ((atom0,atom1,atom2,atom3), (bond0, bond1, bond2))
# But this is the same thing as:
return match
else:
return ((atom3,atom1,atom2,atom0), (bond2,bond1,bond0))
|
gpl-2.0
|
albertomurillo/ansible
|
test/units/modules/network/f5/test_bigip_gtm_monitor_firepass.py
|
38
|
4762
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_monitor_firepass import ApiParameters
from library.modules.bigip_gtm_monitor_firepass import ModuleParameters
from library.modules.bigip_gtm_monitor_firepass import ModuleManager
from library.modules.bigip_gtm_monitor_firepass import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_monitor_firepass import ApiParameters
from ansible.modules.network.f5.bigip_gtm_monitor_firepass import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_monitor_firepass import ModuleManager
from ansible.modules.network.f5.bigip_gtm_monitor_firepass import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='/Common/my-http',
max_load_average='60',
concurrency_limit='70',
ip='1.1.1.1',
port='80',
interval='10',
timeout='20',
ignore_down_response=True,
probe_timeout='30'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/my-http'
assert p.max_load_average == 60
assert p.concurrency_limit == 70
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 10
assert p.timeout == 20
assert p.ignore_down_response is True
assert p.probe_timeout == 30
def test_api_parameters(self):
args = load_fixture('load_gtm_monitor_firepass_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/firepass_gtm'
assert p.max_load_average == 12
assert p.concurrency_limit == 95
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 30
assert p.timeout == 90
assert p.ignore_down_response is True
assert p.probe_timeout == 5
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_monitor_firepass.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_monitor_firepass.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
gpl-3.0
|
bitbandi/all-hash-python
|
algorithm/x13-hash/setup.py
|
1
|
1280
|
from setuptools import setup, Extension
x13_hash_module = Extension('x13_hash',
sources = ['x13module.c',
'x13.c',
'../../sph/blake.c',
'../../sph/bmw.c',
'../../sph/groestl.c',
'../../sph/jh.c',
'../../sph/keccak.c',
'../../sph/skein.c',
'../../sph/cubehash.c',
'../../sph/echo.c',
'../../sph/luffa.c',
'../../sph/simd.c',
'../../sph/hamsi.c',
'../../sph/hamsi_helper.c',
'../../sph/fugue.c',
'../../sph/shavite.c'],
include_dirs=['.', '../../sph'])
setup (name = 'x13_hash',
version = '1.0',
description = 'Bindings for proof of work used by x13 hash',
test_suite = 'test',
ext_modules = [x13_hash_module])
|
mit
|
ssssam/nightbus
|
nightbus/tasks.py
|
1
|
12497
|
# Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Night Bus: Simple SSH-based build automation'''
import gevent
import yaml
import collections
import itertools
import logging
import os
import time
import nightbus
from nightbus.utils import ensure_list
DEFAULT_SHELL = '/bin/bash -c'
class Task():
'''A single task that we can run on one or more hosts.'''
def __init__(self, attrs, name=None, defaults=None, parameters=None):
defaults = defaults or {}
self.name = name or attrs['name']
includes = ensure_list(defaults.get('include')) + \
ensure_list(attrs.get('include'))
self.script = self._script(
attrs['commands'], prologue=defaults.get('prologue'),
includes=includes, parameters=parameters)
# This gets passed straight to ParallelSSHClient.run_command()
# so it's no problem for its value to be `None`.
self.shell = attrs.get('shell', defaults.get('shell', DEFAULT_SHELL))
def _script(self, commands, prologue=None, includes=None, parameters=None):
'''Generate the script that executes this task.'''
parts = []
if parameters:
for name, value in parameters.items():
parts.append('%s=%s' % (name, value))
if prologue:
parts.append(prologue)
for include in includes:
with open(include) as f:
parts.append(f.read())
parts.append(commands)
return '\n'.join(parts)
class TaskList(list):
'''Contains a user-specified list of descriptions of tasks to run.'''
def __init__(self, text):
contents = yaml.safe_load(text)
if isinstance(contents, list):
defaults = None
entry_list = contents
elif isinstance(contents, dict):
defaults = contents.get('defaults', {})
entry_list = contents['tasks']
else:
raise RuntimeError("Tasks file is invalid.")
for entry in entry_list:
self.extend(self._create_tasks(entry, defaults=defaults))
def _create_tasks(self, entry, defaults=None):
'''Create one or more task objects for a given task list entry.
There can be more than one Task object for an entry due to the
'parameters' option.
'''
if 'parameters' in entry:
tasks = []
parameters = entry['parameters']
# Create an iterable for each parameter containing (name, value)
# pairs, e.g. (('param', 1), ('param', 2), ('param', 3)).
iterables = []
for param_name in sorted(parameters.keys()):
param_values = parameters[param_name]
param_pairs = list(itertools.product([param_name], param_values))
iterables.append(param_pairs)
# From that create a list of every combination of parameter values.
if len(iterables) > 1:
combos = list(itertools.product(*iterables))
else:
combos = [[i] for i in iterables[0]]
# The value of a parameter can be given literally, or given as a
# dict with 'repr' and 'value' keys. The value used in the task may
# not be useful when used in the name of the task, it might be an
# empty string or contain unprintable characters, so you can set
# the `repr` in these cases to something else.
def param_repr(value_entry):
if isinstance(value_entry, dict):
return str(value_entry.get('repr', value_entry['value']))
else:
return str(value_entry)
def param_value(value_entry):
if isinstance(value_entry, dict):
return str(value_entry['value'])
else:
return str(value_entry)
# Finally generate the Task object for each parameter combination.
task_base_name = entry['name']
for combo in combos:
this_parameters = {pair[0]: param_value(pair[1]) for pair in combo}
this_parameter_reprs = [param_repr(pair[1]) for pair in combo]
this_name = '.'.join([task_base_name] + this_parameter_reprs)
tasks.append(Task(entry, name=this_name, defaults=defaults,
parameters=this_parameters))
else:
tasks = [Task(entry, defaults=defaults)]
return tasks
def names(self):
return [task.name for task in self]
class TaskResult():
'''Results of executing a one task on one host.'''
def __init__(self, name, host, duration=None, exit_code=None, message_list=None):
self.name = name
self.host = host
self.duration = duration
self.exit_code = exit_code
self.message_list = message_list
def run_task(client, hosts, task, log_directory, run_name=None, force=False):
'''Run a single task on all the specified hosts.'''
name = task.name
run_name = run_name or name
logging.info("%s: Starting task run", run_name)
start_time = time.time()
# Run the commands asynchronously on all hosts.
cmd = 'task_name=%s\n' % name
if force:
cmd += 'force=yes\n'
cmd += task.script
shell = task.shell
output = client.run_command(cmd, shell=shell, stop_on_errors=True)
# ParallelSSH doesn't give us a way to run a callback when the host
# produces output or the command completes. In order to stream the
# output into separate log files, we run a Greenlet to monitor each
# host.
def watch_output(output, host):
log_filename = safe_filename(run_name + '.' + host + '.log')
log = os.path.join(log_directory, log_filename)
messages = []
with open(log, 'wb') as f:
for line in output[host].stdout:
f.write(line.encode('unicode-escape'))
f.write(b'\n')
if line.startswith('##nightbus '):
messages.append(line[len('##nightbus '):])
duration = time.time() - start_time
exit_code = output[host].exit_code
return nightbus.tasks.TaskResult(
run_name, host, duration=duration, exit_code=exit_code, message_list=messages)
watchers = [gevent.spawn(watch_output, output, host) for host in hosts]
gevent.joinall(watchers, raise_error=True)
logging.info("%s: Started all jobs, waiting for them to finish", run_name)
client.join(output)
logging.info("%s: All jobs finished", run_name)
results = collections.OrderedDict()
for result in sorted((watcher.value for watcher in watchers),
key=lambda result: result.host):
results[result.host] = result
return results
def safe_filename(filename):
# If you want to escape more characters, switch to using re.sub()
return filename.replace('/', '_')
def run_all_tasks(client, hosts, tasks, log_directory, force=False):
'''Loop through each task sequentially.
We only want to run one task on a host at a time, as we assume it'll
maximize at least one of available CPU, RAM and IO. However, if fast hosts
could move onto the next task before slow hosts have finished with the
previous one it might be nice.
'''
all_results = collections.OrderedDict()
number = 1
working_hosts = list(hosts)
for task in tasks:
name = '%i.%s' % (number, task.name)
try:
result_dict = run_task(
client, working_hosts, task, log_directory=log_directory,
run_name=name, force=force)
all_results[name] = result_dict
failed_hosts = [t.host for t in result_dict.values()
if t.exit_code != 0]
if failed_hosts:
msg = ("Task %s failed on: %s. No more tasks will run on "
"failed hosts." % (name, ', '.join(failed_hosts)))
logging.warning(msg)
for host in failed_hosts:
working_hosts.remove(host)
if len(working_hosts) == 0:
logging.warning("All hosts have failed, exiting.")
break
number += 1
except KeyboardInterrupt:
# If any tasks finished then we should write a report, even if later
# tasks got interrupted. Thus we must KeyboardInterrupt here so
# that previous results are returned.
logging.info("Received KeyboardInterrupt")
break
return all_results
def duration_as_string(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%d:%02d:%02d" % (h, m, s))
def filter_messages_for_task(task_results):
'''Separate out messages which occured on all hosts.
Returns a tuple of (global_messages, host_messages):
global_messages: messages which appeared in the output of every host
host_messages: a dictionary per host of messages that appeared on that
host but didn't appear on every host.
'''
host_list = list(task_results.keys())
first_host = host_list[0]
if len(host_list) == 1:
message_list = task_results[first_host].message_list
return message_list, {first_host: message_list}
else:
other_hosts = host_list[1:]
unprocessed_messages = {host: collections.deque(result.message_list)
for host, result in task_results.items()}
global_messages = []
host_messages = {host:[] for host in host_list}
# This algorithm isn't smart and will not scale well to lots of
# messages.
while unprocessed_messages[first_host]:
# Take the first message, and search for it in all the other
# message streams.
message = unprocessed_messages[first_host].popleft()
is_global = True
for host in other_hosts:
for host_message in unprocessed_messages[host]:
if message == host_message:
break
else:
is_global = False
if not is_global:
break
if is_global:
global_messages.append(message)
# Now remove this message from the other hosts' message lists,
# plus anything we find before that (which we take to be host
# specific messages).
for host in other_hosts:
while len(unprocessed_messages[host]) > 0:
host_message = unprocessed_messages[host].popleft()
if host_message == message:
break
host_messages[host].append(host_message)
else:
host_messages[first_host].append(message)
for host in other_hosts:
host_messages[host] += unprocessed_messages[host]
return global_messages, host_messages
def write_report(f, all_results):
'''Write a report containing task results and durations.'''
first_line = True
for task_name, task_results in all_results.items():
if first_line:
first_line = False
else:
f.write("\n")
f.write("%s:\n" % task_name)
global_messages, host_messages = filter_messages_for_task(task_results)
for message in global_messages:
f.write(" %s\n" % message)
for host, result in task_results.items():
status = "succeeded" if result.exit_code == 0 else "failed"
duration = duration_as_string(result.duration)
f.write(" - %s: %s in %s\n" % (host, status, duration))
for message in host_messages[host]:
f.write(" %s\n" % message)
|
apache-2.0
|
Nolan330/CS292
|
pymavlink/examples/magtest.py
|
11
|
3808
|
#!/usr/bin/env python
'''
rotate APMs on bench to test magnetometers
'''
import sys, os, time
from math import radians
from pymavlink import mavutil
from optparse import OptionParser
parser = OptionParser("rotate.py [options]")
parser.add_option("--device1", dest="device1", default=None, help="mavlink device1")
parser.add_option("--device2", dest="device2", default=None, help="mavlink device2")
parser.add_option("--baudrate", dest="baudrate", type='int',
help="master port baud rate", default=115200)
(opts, args) = parser.parse_args()
if opts.device1 is None or opts.device2 is None:
print("You must specify a mavlink device")
sys.exit(1)
def set_attitude(rc3, rc4):
global mav1, mav2
values = [ 65535 ] * 8
values[2] = rc3
values[3] = rc4
mav1.mav.rc_channels_override_send(mav1.target_system, mav1.target_component, *values)
mav2.mav.rc_channels_override_send(mav2.target_system, mav2.target_component, *values)
# create a mavlink instance
mav1 = mavutil.mavlink_connection(opts.device1, baud=opts.baudrate)
# create a mavlink instance
mav2 = mavutil.mavlink_connection(opts.device2, baud=opts.baudrate)
print("Waiting for HEARTBEAT")
mav1.wait_heartbeat()
mav2.wait_heartbeat()
print("Heartbeat from APM (system %u component %u)" % (mav1.target_system, mav1.target_system))
print("Heartbeat from APM (system %u component %u)" % (mav2.target_system, mav2.target_system))
print("Waiting for MANUAL mode")
mav1.recv_match(type='SYS_STATUS', condition='SYS_STATUS.mode==2 and SYS_STATUS.nav_mode==4', blocking=True)
mav2.recv_match(type='SYS_STATUS', condition='SYS_STATUS.mode==2 and SYS_STATUS.nav_mode==4', blocking=True)
print("Setting declination")
mav1.mav.param_set_send(mav1.target_system, mav1.target_component,
'COMPASS_DEC', radians(12.33))
mav2.mav.param_set_send(mav2.target_system, mav2.target_component,
'COMPASS_DEC', radians(12.33))
set_attitude(1060, 1160)
event = mavutil.periodic_event(30)
pevent = mavutil.periodic_event(0.3)
rc3_min = 1060
rc3_max = 1850
rc4_min = 1080
rc4_max = 1500
rc3 = rc3_min
rc4 = 1160
delta3 = 2
delta4 = 1
use_pitch = 1
MAV_ACTION_CALIBRATE_GYRO = 17
mav1.mav.action_send(mav1.target_system, mav1.target_component, MAV_ACTION_CALIBRATE_GYRO)
mav2.mav.action_send(mav2.target_system, mav2.target_component, MAV_ACTION_CALIBRATE_GYRO)
print("Waiting for gyro calibration")
mav1.recv_match(type='ACTION_ACK')
mav2.recv_match(type='ACTION_ACK')
print("Resetting mag offsets")
mav1.mav.set_mag_offsets_send(mav1.target_system, mav1.target_component, 0, 0, 0)
mav2.mav.set_mag_offsets_send(mav2.target_system, mav2.target_component, 0, 0, 0)
def TrueHeading(SERVO_OUTPUT_RAW):
p = float(SERVO_OUTPUT_RAW.servo3_raw - rc3_min) / (rc3_max - rc3_min)
return 172 + p*(326 - 172)
while True:
mav1.recv_msg()
mav2.recv_msg()
if event.trigger():
if not use_pitch:
rc4 = 1160
set_attitude(rc3, rc4)
rc3 += delta3
if rc3 > rc3_max or rc3 < rc3_min:
delta3 = -delta3
use_pitch ^= 1
rc4 += delta4
if rc4 > rc4_max or rc4 < rc4_min:
delta4 = -delta4
if pevent.trigger():
print "hdg1: %3u hdg2: %3u ofs1: %4u, %4u, %4u ofs2: %4u, %4u, %4u" % (
mav1.messages['VFR_HUD'].heading,
mav2.messages['VFR_HUD'].heading,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_x,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_y,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_z,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_x,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_y,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_z,
)
time.sleep(0.01)
# 314M 326G
# 160M 172G
|
lgpl-3.0
|
kuke/models
|
legacy/ltr/lambda_rank.py
|
4
|
1560
|
"""
LambdaRank is a listwise rank model.
https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf
"""
import paddle.v2 as paddle
def lambda_rank(input_dim, is_infer=False):
"""
The input data and label for LambdaRank must be sequences.
parameters :
input_dim, one document's dense feature vector dimension
The format of the dense_vector_sequence is as follows:
[[f, ...], [f, ...], ...], f is a float or an int number
"""
data = paddle.layer.data("data",
paddle.data_type.dense_vector_sequence(input_dim))
# Define the hidden layer.
hd1 = paddle.layer.fc(input=data,
size=128,
act=paddle.activation.Tanh(),
param_attr=paddle.attr.Param(initial_std=0.01))
hd2 = paddle.layer.fc(input=hd1,
size=10,
act=paddle.activation.Tanh(),
param_attr=paddle.attr.Param(initial_std=0.01))
output = paddle.layer.fc(input=hd2,
size=1,
act=paddle.activation.Linear(),
param_attr=paddle.attr.Param(initial_std=0.01))
if not is_infer:
label = paddle.layer.data("label",
paddle.data_type.dense_vector_sequence(1))
cost = paddle.layer.lambda_cost(
input=output, score=label, NDCG_num=6, max_sort_size=-1)
return cost
else:
return output
|
apache-2.0
|
colinnewell/odoo
|
addons/website_certification/__openerp__.py
|
320
|
1562
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Certified People',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Display your network of certified people on your website',
'version': '1.0',
'author': 'OpenERP S.A.',
'depends': ['marketing', 'website'],
'description': """
Display your network of certified people on your website
""",
'data': [
'security/ir.model.access.csv',
'views/website_certification_views.xml',
'views/website_certification_templates.xml',
],
'installable': True,
}
|
agpl-3.0
|
LimpidTech/buildbot-manager
|
setup.py
|
1
|
1555
|
from setuptools import setup, find_packages
import os, sys
# The repository root directory
project_root = os.path.abspath(os.path.dirname(__file__))
# The projects which this project depends upon
dependancies = [
'pyramid',
'buildbot',
'repoze.tm2>=1.0b1',
'WebError',
'pyramid_jinja2',
'zope.interface',
]
# Files that contain different metadata which is used for setting up the project
meta_files = {
'README.md': None,
'CHANGES.md': None,
'setup/CLASSIFIERS.txt': None,
'setup/ENTRY_POINTS.ini': None,
}
# Read each of our files and update their data, or set them to a blank string.
for filename in meta_files:
try:
current_file = open(os.path.join(project_root, filename))
meta_files[filename] = current_file.read()
current_file.close()
except IOError:
meta_files[filename] = ''
setup(name='bbmanager',
version='0.0.1-prototype',
description='Manage your buildbot server with a nice web interface.',
long_description=meta_files['README.md'],
classifiers=meta_files['setup/CLASSIFIERS.txt'].split("\n"),
author='Brandon R. Stoner',
author_email='monokrome@limpidtech.com',
url='http://github.com/LimpidTech/buildbot-manager',
keywords='web pylons pyramid buildbot',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=dependancies,
test_suite='bbmanager.test',
entry_points=meta_files['setup/ENTRY_POINTS.ini'],
paster_plugins=['pyramid'],
)
|
mit
|
jbarriosc/ACSUFRO
|
Benchmark/components/src/perftestImpl/ErrTestComponent.py
|
4
|
2948
|
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
'''
TODO:
- All!!!
'''
__version__ = "$Id: ErrTestComponent.py,v 1.2 2004/09/24 21:15:46 dfugate Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
#--CORBA STUBS-----------------------------------------------------------------
import perftest__POA
#--ACS Imports-----------------------------------------------------------------
from perftestImpl.BasePerfComp import BasePerfComp
import BenchmarkErrTypeImpl
#--GLOBALS---------------------------------------------------------------------
#------------------------------------------------------------------------------
class ErrTestComponent(perftest__POA.ErrTestComponent,
BasePerfComp):
#--------------------------------------------------------------------------
def __init__(self):
'''
'''
BasePerfComp.__init__(self)
#--------------------------------------------------------------------------
def getException(self, depth, e):
'''
'''
if depth==0:
return e
else:
return self.getException(depth-1,
BenchmarkErrTypeImpl.BenchmarkErr0ExImpl(exception=e, create=1))
#------------------------------------------------------------------------------
def testExceptions(self, depth, isError):
'''
void testExceptions(in long depth, in boolean isError) raises (ACSErr::ACSException, BenchmarkErrType::BenchmarkErr0Ex);
'''
if depth < 0:
print "Bad depth"
return
e = self.getException(depth,
BenchmarkErrTypeImpl.BenchmarkErr0ExImpl())
if isError==1:
raise e
return
#------------------------------------------------------------------------------
|
lgpl-2.1
|
pascalchevrel/bedrock
|
lib/l10n_utils/management/commands/process_ftl.py
|
6
|
6296
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import shutil
from subprocess import CalledProcessError
from django.core.management.base import CommandError
from django.conf import settings
from fluent.syntax.parser import FluentParser, ParseError
from lib.l10n_utils.fluent import fluent_l10n, get_metadata, write_metadata
from ._ftl_repo_base import FTLRepoCommand
GIT_COMMIT_EMAIL = 'meao-bots+mozmarrobot@mozilla.com'
GIT_COMMIT_NAME = 'MozMEAO Bot'
class NoisyFluentParser(FluentParser):
"""A parser that will raise exceptions.
The one from fluent.syntax doesn't raise exceptions, but will
return instances of fluent.syntax.ast.Junk instead.
"""
def get_entry_or_junk(self, ps):
"""Allow the ParseError to bubble up"""
entry = self.get_entry(ps)
ps.expect_line_end()
return entry
class Command(FTLRepoCommand):
help = 'Processes .ftl files from l10n team for use in bedrock'
parser = None
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--push', action='store_true', dest='push', default=False,
help='Push the changes to the MEAO Fluent files repo.')
def handle(self, *args, **options):
super().handle(*args, **options)
self.parser = NoisyFluentParser()
self.update_fluent_files()
self.update_l10n_team_files()
no_errors = self.copy_ftl_files()
self.set_activation()
self.copy_configs()
if options['push']:
changes = self.commit_changes()
if changes:
self.push_changes()
if not no_errors:
raise CommandError('Some errors were discovered in some .ftl files and they were not updated.'
'See above for details.')
def config_fluent_repo(self):
"""Set user config so that committing will work"""
self.meao_repo.git('config', 'user.email', GIT_COMMIT_EMAIL)
self.meao_repo.git('config', 'user.name', GIT_COMMIT_NAME)
def commit_changes(self):
self.config_fluent_repo()
self.meao_repo.git('add', '.')
try:
self.meao_repo.git('commit', '-m', 'Update files from l10n repo')
except CalledProcessError:
self.stdout.write('No changes to commit')
return False
self.stdout.write('Committed changes to local repo')
return True
def push_changes(self):
try:
self.meao_repo.git('push', self.git_push_url, 'HEAD:master')
except CalledProcessError:
raise CommandError(f'There was a problem pushing to {self.meao_repo.remote_url}')
commit = self.meao_repo.git('rev-parse', '--short', 'HEAD')
self.stdout.write(f'Pushed {commit} to {self.meao_repo.remote_url}')
@property
def git_push_url(self):
if not settings.FLUENT_REPO_AUTH:
raise CommandError('Git push authentication not configured')
return self.meao_repo.remote_url_auth(settings.FLUENT_REPO_AUTH)
def _copy_file(self, filepath):
relative_filepath = filepath.relative_to(self.l10n_repo.path)
to_filepath = self.meao_repo.path.joinpath(relative_filepath)
to_filepath.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(str(filepath), str(to_filepath))
self.stdout.write('.', ending='')
self.stdout.flush()
def copy_configs(self):
count = 0
for filepath in self.l10n_repo.path.rglob('*.toml'):
self._copy_file(filepath)
count += 1
self.stdout.write(f'\nCopied {count} .toml files')
def copy_ftl_files(self):
count = 0
errors = []
for filepath in self.l10n_repo.path.rglob('*.ftl'):
if not self.lint_ftl_file(filepath):
errors.append(filepath.relative_to(self.l10n_repo.path))
continue
self._copy_file(filepath)
count += 1
self.stdout.write(f'\nCopied {count} .ftl files')
if errors:
self.stdout.write('The following files had parse errors and were not copied:')
for fpath in errors:
self.stdout.write(f'- {fpath}')
return False
return True
def lint_ftl_file(self, filepath):
with filepath.open() as ftl:
try:
self.parser.parse(ftl.read())
except ParseError:
return False
return True
def set_activation(self):
updated_ftl = set()
modified, _ = self.meao_repo.modified_files()
for fname in modified:
if not fname.endswith('.ftl'):
continue
locale, ftl_name = fname.split('/', 1)
updated_ftl.add(ftl_name)
for ftl_name in updated_ftl:
self.calculate_activation(ftl_name)
def calculate_activation(self, ftl_file):
translations = self.meao_repo.path.glob(f'*/{ftl_file}')
metadata = get_metadata(ftl_file)
active_locales = metadata.get('active_locales', [])
inactive_locales = metadata.get('inactive_locales', [])
percent_required = metadata.get('percent_required', settings.FLUENT_DEFAULT_PERCENT_REQUIRED)
all_locales = {str(x.relative_to(self.meao_repo.path)).split('/', 1)[0] for x in translations}
locales_to_check = all_locales.difference(['en'], active_locales, inactive_locales)
new_activations = []
for locale in locales_to_check:
l10n = fluent_l10n([locale, 'en'], [ftl_file])
if not l10n.has_required_messages:
continue
percent_trans = l10n.percent_translated
if percent_trans < percent_required:
continue
new_activations.append(locale)
if new_activations:
active_locales.extend(new_activations)
metadata['active_locales'] = sorted(active_locales)
write_metadata(ftl_file, metadata)
self.stdout.write(f'Activated {len(new_activations)} new locales for {ftl_file}')
|
mpl-2.0
|
stwunsch/gnuradio
|
gr-fft/python/fft/qa_goertzel.py
|
57
|
2049
|
#!/usr/bin/env python
#
# Copyright 2006,2007,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from math import pi, cos
from gnuradio import gr, gr_unittest, fft, blocks
class test_goertzel(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def make_tone_data(self, rate, freq):
return [cos(2*pi*x*freq/rate) for x in range(rate)]
def transform(self, src_data, rate, freq):
src = blocks.vector_source_f(src_data, False)
dft = fft.goertzel_fc(rate, rate, freq)
dst = blocks.vector_sink_c()
self.tb.connect(src, dft, dst)
self.tb.run()
return dst.data()
def test_001(self): # Measure single tone magnitude
rate = 8000
freq = 100
bin = freq
src_data = self.make_tone_data(rate, freq)
expected_result = 0.5
actual_result = abs(self.transform(src_data, rate, bin)[0])
self.assertAlmostEqual(expected_result, actual_result, places=4)
def test_002(self): # Measure off frequency magnitude
rate = 8000
freq = 100
bin = freq/2
src_data = self.make_tone_data(rate, freq)
expected_result = 0.0
actual_result = abs(self.transform(src_data, rate, bin)[0])
self.assertAlmostEqual(expected_result, actual_result, places=4)
if __name__ == '__main__':
gr_unittest.run(test_goertzel, "test_goertzel.xml")
|
gpl-3.0
|
barbuza/django
|
tests/sitemaps_tests/test_https.py
|
205
|
3608
|
from __future__ import unicode_literals
from datetime import date
from django.test import ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .base import SitemapTestsBase
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.https')
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
@ignore_warnings(category=RemovedInDjango110Warning)
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/secure/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
@ignore_warnings(category=RemovedInDjango110Warning)
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://')
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
|
bsd-3-clause
|
xq262144/hue
|
desktop/core/ext-py/cx_Oracle-5.2.1/test/uStringVar.py
|
3
|
10381
|
"""Module for testing string variables."""
class TestStringVar(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.rawData = []
self.dataByKey = {}
for i in range(1, 11):
stringCol = u"String %d" % i
fixedCharCol = (u"Fixed Char %d" % i).ljust(40)
rawCol = "Raw %d" % i
if i % 2:
nullableCol = u"Nullable %d" % i
else:
nullableCol = None
dataTuple = (i, stringCol, rawCol, fixedCharCol, nullableCol)
self.rawData.append(dataTuple)
self.dataByKey[i] = dataTuple
def testBindString(self):
"test binding in a string"
self.cursor.execute(u"""
select * from TestStrings
where StringCol = :value""",
value = u"String 5")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[5]])
def testBindDifferentVar(self):
"test binding a different variable on second execution"
retval_1 = self.cursor.var(cx_Oracle.STRING, 30)
retval_2 = self.cursor.var(cx_Oracle.STRING, 30)
self.cursor.execute(u"begin :retval := 'Called'; end;",
retval = retval_1)
self.failUnlessEqual(retval_1.getvalue(), u"Called")
self.cursor.execute(u"begin :retval := 'Called'; end;",
retval = retval_2)
self.failUnlessEqual(retval_2.getvalue(), u"Called")
def testBindStringAfterNumber(self):
"test binding in a string after setting input sizes to a number"
self.cursor.setinputsizes(value = cx_Oracle.NUMBER)
self.cursor.execute(u"""
select * from TestStrings
where StringCol = :value""",
value = u"String 6")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[6]])
def testBindStringArrayBySizes(self):
"test binding in a string array (with setinputsizes)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
self.cursor.setinputsizes(array = [cx_Oracle.STRING, 10])
array = [r[1] for r in self.rawData]
self.cursor.execute(u"""
begin
:returnValue := pkg_TestStringArrays.TestInArrays(
:integerValue, :array);
end;""",
returnValue = returnValue,
integerValue = 6,
array = array)
self.failUnlessEqual(returnValue.getvalue(), 87)
def testBindStringArrayByVar(self):
"test binding in a string array (with arrayvar)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
array = self.cursor.arrayvar(cx_Oracle.STRING, 10, 20)
array.setvalue(0, [r[1] for r in self.rawData])
self.cursor.execute(u"""
begin
:returnValue := pkg_TestStringArrays.TestInArrays(
:integerValue, :array);
end;""",
returnValue = returnValue,
integerValue = 7,
array = array)
self.failUnlessEqual(returnValue.getvalue(), 88)
def testBindInOutStringArrayByVar(self):
"test binding in/out a string array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.STRING, 10, 100)
originalData = [r[1] for r in self.rawData]
expectedData = [u"Converted element # %d originally had length %d" % \
(i, len(originalData[i - 1])) for i in range(1, 6)] + \
originalData[5:]
array.setvalue(0, originalData)
self.cursor.execute(u"""
begin
pkg_TestStringArrays.TestInOutArrays(:numElems, :array);
end;""",
numElems = 5,
array = array)
self.failUnlessEqual(array.getvalue(), expectedData)
def testBindOutStringArrayByVar(self):
"test binding out a string array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.STRING, 6, 100)
expectedData = [u"Test out element # %d" % i for i in range(1, 7)]
self.cursor.execute(u"""
begin
pkg_TestStringArrays.TestOutArrays(:numElems, :array);
end;""",
numElems = 6,
array = array)
self.failUnlessEqual(array.getvalue(), expectedData)
def testBindRaw(self):
"test binding in a raw"
self.cursor.setinputsizes(value = cx_Oracle.BINARY)
self.cursor.execute(u"""
select * from TestStrings
where RawCol = :value""",
value = "Raw 4")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[4]])
def testBindAndFetchRowid(self):
"test binding (and fetching) a rowid"
self.cursor.execute(u"""
select rowid
from TestStrings
where IntCol = 3""")
rowid, = self.cursor.fetchone()
self.cursor.execute(u"""
select *
from TestStrings
where rowid = :value""",
value = rowid)
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[3]])
def testBindNull(self):
"test binding in a null"
self.cursor.execute(u"""
select * from TestStrings
where StringCol = :value""",
value = None)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testBindOutSetInputSizesByType(self):
"test binding out with set input sizes defined (by type)"
vars = self.cursor.setinputsizes(value = cx_Oracle.STRING)
self.cursor.execute(u"""
begin
:value := 'TSI';
end;""")
self.failUnlessEqual(vars["value"].getvalue(), u"TSI")
def testBindOutSetInputSizesByInteger(self):
"test binding out with set input sizes defined (by integer)"
vars = self.cursor.setinputsizes(value = 30)
self.cursor.execute(u"""
begin
:value := 'TSI (I)';
end;""")
self.failUnlessEqual(vars["value"].getvalue(), u"TSI (I)")
def testBindInOutSetInputSizesByType(self):
"test binding in/out with set input sizes defined (by type)"
vars = self.cursor.setinputsizes(value = cx_Oracle.STRING)
self.cursor.execute(u"""
begin
:value := :value || ' TSI';
end;""",
value = u"InVal")
self.failUnlessEqual(vars["value"].getvalue(), u"InVal TSI")
def testBindInOutSetInputSizesByInteger(self):
"test binding in/out with set input sizes defined (by integer)"
vars = self.cursor.setinputsizes(value = 30)
self.cursor.execute(u"""
begin
:value := :value || ' TSI (I)';
end;""",
value = u"InVal")
self.failUnlessEqual(vars["value"].getvalue(), u"InVal TSI (I)")
def testBindOutVar(self):
"test binding out with cursor.var() method"
var = self.cursor.var(cx_Oracle.STRING)
self.cursor.execute(u"""
begin
:value := 'TSI (VAR)';
end;""",
value = var)
self.failUnlessEqual(var.getvalue(), u"TSI (VAR)")
def testBindInOutVarDirectSet(self):
"test binding in/out with cursor.var() method"
var = self.cursor.var(cx_Oracle.STRING)
var.setvalue(0, u"InVal")
self.cursor.execute(u"""
begin
:value := :value || ' TSI (VAR)';
end;""",
value = var)
self.failUnlessEqual(var.getvalue(), u"InVal TSI (VAR)")
def testBindLongString(self):
"test that binding a long string succeeds"
self.cursor.execute(u"""
declare
t_Temp varchar2(10000);
begin
t_Temp := :bigString;
end;""",
bigString = u"X" * 10000)
def testBindLongStringAfterSettingSize(self):
"test that setinputsizes() returns a long variable"
var = self.cursor.setinputsizes(test = 90000)["test"]
inString = u"1234567890" * 9000
var.setvalue(0, inString)
outString = var.getvalue()
self.failUnlessEqual(inString, outString,
"output does not match: in was %d, out was %d" % \
(len(inString), len(outString)))
def testCursorDescription(self):
"test cursor description is accurate"
self.cursor.execute(u"select * from TestStrings")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'STRINGCOL', cx_Oracle.STRING, 20, 20, 0, 0, 0),
(u'RAWCOL', cx_Oracle.BINARY, 30, 30, 0, 0, 0),
(u'FIXEDCHARCOL', cx_Oracle.FIXED_CHAR, 40, 40, 0, 0, 0),
(u'NULLABLECOL', cx_Oracle.STRING, 50, 50, 0, 0, 1) ])
def testFetchAll(self):
"test that fetching all of the data returns the correct results"
self.cursor.execute(u"select * From TestStrings order by IntCol")
self.failUnlessEqual(self.cursor.fetchall(), self.rawData)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testFetchMany(self):
"test that fetching data in chunks returns the correct results"
self.cursor.execute(u"select * From TestStrings order by IntCol")
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[0:3])
self.failUnlessEqual(self.cursor.fetchmany(2), self.rawData[3:5])
self.failUnlessEqual(self.cursor.fetchmany(4), self.rawData[5:9])
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[9:])
self.failUnlessEqual(self.cursor.fetchmany(3), [])
def testFetchOne(self):
"test that fetching a single row returns the correct results"
self.cursor.execute(u"""
select *
from TestStrings
where IntCol in (3, 4)
order by IntCol""")
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[3])
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[4])
self.failUnlessEqual(self.cursor.fetchone(), None)
|
apache-2.0
|
evernym/plenum
|
plenum/test/checkpoints/test_view_change_after_checkpoint.py
|
2
|
4527
|
import pytest
from plenum.test.checkpoints.helper import checkRequestCounts
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import ensureElectionsDone
from plenum.test.view_change.helper import ensure_view_change
from stp_core.loop.eventually import eventually
from plenum.test.helper import sdk_send_batches_of_random_and_check, get_pp_seq_no
CHK_FREQ = 5
@pytest.fixture(scope='function',
params=['greater_than_checkpoint', 'lesser_than_checkpoint', 'equal_to_checkpoint'])
def sent_batches(request, chkFreqPatched):
# Test with number of sent batches greater than checkpoint,
# lesser than checkpoint and equal to checkpont.
if request.param == 'greater_than_checkpoint':
return CHK_FREQ + 2
if request.param == 'lesser_than_checkpoint':
return CHK_FREQ - 2
if request.param == 'equal_to_checkpoint':
return CHK_FREQ
low_watermark = 0
batches_count = 0
@pytest.mark.skip(reason="INDY-1336. For now, preprepares, prepares and commits queues are cleaned after view change")
def test_checkpoint_across_views(sent_batches, chkFreqPatched, looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client):
"""
Test checkpointing across views.
This test checks that checkpointing and garbage collection works correctly
no matter if view change happened before a checkpoint or after a checkpoint
"""
global low_watermark
global batches_count
batches_count = get_pp_seq_no(txnPoolNodeSet)
low_watermark = txnPoolNodeSet[0].master_replica.h
batch_size = chkFreqPatched.Max3PCBatchSize
sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
batch_size * sent_batches, sent_batches)
batches_count += sent_batches
# Check that correct garbage collection happens
non_gced_batch_count = (batches_count - CHK_FREQ) if batches_count >= CHK_FREQ else batches_count
looper.run(eventually(checkRequestCounts, txnPoolNodeSet, batch_size * non_gced_batch_count,
non_gced_batch_count, retryWait=1))
ensure_view_change(looper, txnPoolNodeSet)
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
batches_count += 1
low_watermark = txnPoolNodeSet[0].master_replica.h
# Check that after view change, proper clean up is done
for node in txnPoolNodeSet:
for r in node.replicas.values():
# Checkpoint was started after sending audit txn
# assert not r.checkpoints
# No stashed checkpoint for previous view
assert r.h == low_watermark
assert all(cp.view_no >= r.viewNo for cp in r._checkpointer._received_checkpoints)
# from audit txn
assert r._ordering_service._lastPrePrepareSeqNo == batches_count
assert r.H == r.h + chkFreqPatched.LOG_SIZE
# All this manipulations because after view change we will send an empty batch for auditing
checkRequestCounts(txnPoolNodeSet, 0, 1)
if sent_batches > CHK_FREQ:
expected_batch_count = batches_count - CHK_FREQ + 1
additional_after_vc = 0
elif sent_batches == CHK_FREQ:
expected_batch_count = 0
additional_after_vc = 0
sent_batches = CHK_FREQ - 1
else:
expected_batch_count = sent_batches + 1
additional_after_vc = 1
# Even after view change, chekpointing works
sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
batch_size * sent_batches, sent_batches)
batches_count += sent_batches
looper.run(eventually(checkRequestCounts, txnPoolNodeSet, batch_size * (expected_batch_count - additional_after_vc),
expected_batch_count, retryWait=1))
# Send more batches so one more checkpoint happens. This is done so that
# when this test finishes, all requests are garbage collected and the
# next run of this test (with next param) has the calculations correct
more = CHK_FREQ - expected_batch_count
sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
batch_size * more, more)
batches_count += more
looper.run(eventually(checkRequestCounts, txnPoolNodeSet, 0, 0, retryWait=1))
|
apache-2.0
|
takeshineshiro/horizon
|
openstack_dashboard/dashboards/admin/aggregates/tests.py
|
12
|
19957
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
from openstack_dashboard.dashboards.admin.aggregates import workflows
from openstack_dashboard.test import helpers as test
class BaseAggregateWorkflowTests(test.BaseAdminViewTests):
def _get_create_workflow_data(self, aggregate, hosts=None):
aggregate_info = {"name": aggregate.name,
"availability_zone": aggregate.availability_zone}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
def _get_manage_workflow_data(self, aggregate, hosts=None, ):
aggregate_info = {"id": aggregate.id}
if hosts:
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
host_field_name = 'add_host_to_aggregate_role_member'
aggregate_info[host_field_name] = \
[h.host_name for h in compute_hosts]
return aggregate_info
class CreateAggregateWorkflowTests(BaseAggregateWorkflowTests):
@test.create_stubs({api.nova: ('host_list', ), })
def test_workflow_get(self):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, constants.AGGREGATES_CREATE_VIEW_TEMPLATE)
self.assertEqual(workflow.name, workflows.CreateAggregateWorkflow.name)
self.assertQuerysetEqual(
workflow.steps,
['<SetAggregateInfoStep: set_aggregate_info>',
'<AddHostsToAggregateStep: add_host_to_aggregate>'])
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list',
'aggregate_create'), })
def _test_generic_create_aggregate(self, workflow_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
if not expected_error_message:
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_create_aggregate(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
self._test_generic_create_aggregate(workflow_data, aggregate)
def test_create_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
workflow_data = self._get_create_workflow_data(aggregate)
workflow_data['name'] = ''
workflow_data['availability_zone'] = ''
self._test_generic_create_aggregate(workflow_data, aggregate, 1,
u'This field is required')
@test.create_stubs({api.nova: ('host_list',
'aggregate_details_list',
'aggregate_create',
'add_host_to_aggregate'), })
def test_create_aggregate_with_hosts(self):
aggregate = self.aggregates.first()
hosts = self.hosts.list()
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
api.nova.aggregate_details_list(IsA(http.HttpRequest)).AndReturn([])
workflow_data = self._get_create_workflow_data(aggregate, hosts)
api.nova.aggregate_create(
IsA(http.HttpRequest),
name=workflow_data['name'],
availability_zone=workflow_data['availability_zone'],
).AndReturn(aggregate)
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
for host in compute_hosts:
api.nova.add_host_to_aggregate(
IsA(http.HttpRequest),
aggregate.id, host.host_name).InAnyOrder()
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('host_list', 'aggregate_details_list', ), })
def test_host_list_nova_compute(self):
hosts = self.hosts.list()
compute_hosts = []
for host in hosts:
if host.service == 'compute':
compute_hosts.append(host)
api.nova.host_list(IsA(http.HttpRequest)).AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse(constants.AGGREGATES_CREATE_URL)
res = self.client.get(url)
workflow = res.context['workflow']
step = workflow.get_step("add_host_to_aggregate")
field_name = step.get_member_field_name('member')
self.assertEqual(len(step.action.fields[field_name].choices),
len(compute_hosts))
class AggregatesViewTests(test.BaseAdminViewTests):
@mock.patch('openstack_dashboard.api.nova.extension_supported',
mock.Mock(return_value=False))
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',),
api.cinder: ('tenant_absolute_limits',)})
def test_panel_not_available(self):
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
self.patchers['aggregates'].stop()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertNotIn('Host Aggregates', res.content)
@test.create_stubs({api.nova: ('aggregate_details_list',
'availability_zone_list',)})
def test_index(self):
api.nova.aggregate_details_list(IsA(http.HttpRequest)) \
.AndReturn(self.aggregates.list())
api.nova.availability_zone_list(IsA(http.HttpRequest), detailed=True) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_INDEX_URL))
self.assertTemplateUsed(res, constants.AGGREGATES_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['host_aggregates_table'].data,
self.aggregates.list())
self.assertItemsEqual(res.context['availability_zones_table'].data,
self.availability_zones.list())
@test.create_stubs({api.nova: ('aggregate_update', 'aggregate_get',), })
def _test_generic_update_aggregate(self, form_data, aggregate,
error_count=0,
expected_error_message=None):
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id))\
.AndReturn(aggregate)
if not expected_error_message:
az = form_data['availability_zone']
aggregate_data = {'name': form_data['name'],
'availability_zone': az}
api.nova.aggregate_update(IsA(http.HttpRequest), str(aggregate.id),
aggregate_data)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_UPDATE_URL,
args=[aggregate.id]),
form_data)
if not expected_error_message:
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(
res, reverse(constants.AGGREGATES_INDEX_URL))
else:
self.assertFormErrors(res, error_count, expected_error_message)
def test_update_aggregate(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id,
'name': 'my_new_name',
'availability_zone': 'my_new_zone'}
self._test_generic_update_aggregate(form_data, aggregate)
def test_update_aggregate_fails_missing_fields(self):
aggregate = self.aggregates.first()
form_data = {'id': aggregate.id}
self._test_generic_update_aggregate(form_data, aggregate, 1,
u'This field is required')
class ManageHostsTests(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('aggregate_get', 'host_list')})
def test_manage_hosts(self):
aggregate = self.aggregates.first()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
res = self.client.get(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
constants.AGGREGATES_MANAGE_HOSTS_TEMPLATE)
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_remove_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2']
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_add_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.InAnyOrder().AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.InAnyOrder().AndReturn(aggregate)
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id), host3.host_name) \
.InAnyOrder().AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def test_manage_hosts_update_clean_not_empty_aggregate_should_fail(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host2']
form_data = {'manageaggregatehostsaction_role_member':
[]}
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2')\
.AndRaise(self.exceptions.nova)
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
@test.create_stubs({api.nova: ('aggregate_get', 'add_host_to_aggregate',
'remove_host_from_aggregate',
'host_list')})
def _test_manage_hosts_update(self,
host,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=False):
if cleanAggregates:
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host3').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host2').InAnyOrder()
api.nova.remove_host_from_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
'host1').InAnyOrder()
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.aggregate_get(IsA(http.HttpRequest), str(aggregate.id)) \
.AndReturn(aggregate)
if addAggregate:
api.nova.add_host_to_aggregate(IsA(http.HttpRequest),
str(aggregate.id),
host.host_name)
self.mox.ReplayAll()
res = self.client.post(reverse(constants.AGGREGATES_MANAGE_HOSTS_URL,
args=[aggregate.id]),
form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse(constants.AGGREGATES_INDEX_URL))
def test_manage_hosts_update_nothing_not_empty_aggregate(self):
aggregate = self.aggregates.first()
host = self.hosts.list()[0]
aggregate.hosts = [host.host_name]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_nothing_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False)
def test_manage_hosts_update_add_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = []
host = self.hosts.list()[0]
form_data = {'manageaggregatehostsaction_role_member':
[host.host_name]}
self._test_manage_hosts_update(host,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_add_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['devstack001']
host1 = self.hosts.list()[0]
host3 = self.hosts.list()[2]
form_data = {'manageaggregatehostsaction_role_member':
[host1.host_name, host3.host_name]}
self._test_manage_hosts_update(host3,
aggregate,
form_data,
addAggregate=True)
def test_manage_hosts_update_clean_not_empty_aggregate(self):
aggregate = self.aggregates.first()
aggregate.hosts = ['host1', 'host2', 'host3']
form_data = {'manageaggregatehostsaction_role_member':
[]}
self._test_manage_hosts_update(None,
aggregate,
form_data,
addAggregate=False,
cleanAggregates=True)
|
apache-2.0
|
EKT1/valence
|
valence/bayes.py
|
1
|
1505
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import codecs
import re
import pkg_resources
from nltk.classify import NaiveBayesClassifier
space = re.compile('[\'".,!?\\s\\(\\)]+')
cats = ('positiivne', 'negatiivne', 'neutraalne', 'vastuoluline')
classifier = None
corpus_name = pkg_resources.resource_filename("valence", "korpus.csv")
def load_corpus(corpus_name=corpus_name):
print("Load corpus:", corpus_name, file=sys.stderr)
features = []
with codecs.open(corpus_name, 'r', encoding='utf-8') as f:
# for line in f: print line
for line in f:
row = line.split(',', 1)
words = space.split(row[1])
feats = dict([(word, True) for word in words])
features.append((feats, row[0]))
return features
def get_classifier():
global classifier
if not classifier:
corpus = load_corpus()
if corpus:
print("Train", file=sys.stderr)
classifier = NaiveBayesClassifier.train(corpus)
# print >> sys.stderr, classifier.labels()
print(classifier.show_most_informative_features(100), file=sys.stderr)
else:
print("No corpus!", file=sys.stderr)
def classify(words):
get_classifier()
feats = dict([(word, True) for word in words])
return classifier.classify(feats)
def prob_classify(words):
get_classifier()
feats = dict([(word, True) for word in words])
return classifier.prob_classify(feats)
|
gpl-3.0
|
pudo/opennames
|
opensanctions/crawlers/eu_cor_members.py
|
1
|
4495
|
from urllib.parse import urljoin
from normality import stringify, collapse_spaces, slugify
from lxml import html
from opensanctions.util import date_formats, DAY
def parse_date(text):
return date_formats(text, [("%d/%m/%Y", DAY)])
def crawl_person(context, name, url):
context.log.debug("Crawling member", name=name, url=url)
res = context.http.get(url)
doc = html.fromstring(res.text)
_, person_id = url.rsplit("/", 1)
person = context.make("Person")
person.make_slug(person_id)
person.add("sourceUrl", url)
person.add("name", name)
person.add("topics", "role.pep")
last_name, first_name = name.split(", ", 1)
person.add("firstName", first_name)
person.add("lastName", last_name)
address = {}
details = doc.find('.//div[@class="regular-details"]')
for row in details.findall('.//ul[@class="no-bullet"]/li'):
children = row.getchildren()
title = children[0]
title_text = collapse_spaces(stringify(title.text_content()))
title_text = title_text or title.get("class")
value = collapse_spaces(title.tail)
if title_text in ("Full name:", "Address:", "Declaration of interests"):
# ignore these.
continue
if title_text == "Emails:":
emails = [e.text for e in row.findall(".//a")]
person.add("email", emails)
continue
if "glyphicon-phone" in title_text:
person.add("phone", value.split(","))
continue
if "fa-fax" in title_text:
# TODO: yeah, no
# person.add("phone", value)
continue
if title_text in ("Web sites:", "list-inline"):
sites = [e.get("href") for e in row.findall(".//a")]
person.add("website", sites)
continue
if title_text == "Represented Country:":
person.add("country", value)
continue
if title_text == "Languages:":
# TODO: missing in FtM
# person.add("languages", value.split(','))
continue
if "Regions since:" in title_text:
date = parse_date(value)
person.context["created_at"] = date
continue
if "Date of birth:" in title_text:
person.add("birthDate", parse_date(value))
continue
if "Commissions:" in title_text:
for com in row.findall(".//li"):
text = collapse_spaces(com.text_content())
sep = "Mandate - "
if sep in text:
_, text = text.split(sep, 1)
person.add("sector", text)
continue
if "Areas of interest:" in title_text:
for area in row.findall(".//li"):
person.add("keywords", area.text_content())
continue
if title.tag == "i" and value is None:
person.add("position", title_text)
continue
if title_text in ("Country:"):
person.add("country", value)
if title_text in ("Street:", "Postal code:", "City:", "Country:"):
address[title_text.replace(":", "")] = value
continue
if title_text == "Political group:":
group = context.make("Organization")
group.add("name", value)
slug = value
if "(" in slug:
_, slug = slug.rsplit("(", 1)
slug = slugify(slug, sep="-")
group.id = f"eu-cor-group-{slug}"
context.emit(group)
member = context.make("Membership")
member.make_id("Membership", person.id, group.id)
member.add("member", person)
member.add("organization", group)
context.emit(member)
continue
address = (
address.get("Street"),
address.get("City"),
address.get("Posal code"),
address.get("Country"),
)
address = ", ".join([a for a in address if a is not None])
person.add("address", address)
context.emit(person, target=True)
def crawl(context):
res = context.http.get(context.dataset.data.url)
doc = html.fromstring(res.text)
seen = set()
for link in doc.findall('.//div[@class="people"]//li//a[@class="_fullname"]'):
url = urljoin(res.url, link.get("href"))
url, _ = url.split("?", 1)
if url in seen:
continue
seen.add(url)
crawl_person(context, link.text, url)
|
mit
|
computersalat/ansible
|
lib/ansible/plugins/callback/__init__.py
|
4
|
16578
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import difflib
import json
import os
import sys
import warnings
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins import AnsiblePlugin, get_plugin_class
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
if PY3:
# OrderedDict is needed for a backwards compat shim on Python3.x only
# https://github.com/ansible/ansible/pull/49512
from collections import OrderedDict
else:
OrderedDict = None
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
class CallbackBase(AnsiblePlugin):
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
''' helper for callbacks, so they don't all have to include deepcopy '''
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = v
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
'''
# load from config
self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
try:
jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
# Python3 bug: throws an exception when keys are non-homogenous types:
# https://bugs.python.org/issue25457
# sort into an OrderedDict and then json.dumps() that instead
if not OrderedDict:
raise
jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
return jsonified_results
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
# difflib in Python 2.6 adds trailing spaces after
# filenames in the -- before/++ after headers.
difflines[0] = difflines[0].replace(u' \n', u'\n')
difflines[1] = difflines[1].replace(u' \n', u'\n')
# it also treats empty files differently
difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
''' retrieves the value to be displayed as a label for an item entry from a result object'''
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
''' removes data from results for display '''
# mostly controls that debug only outputs what it was meant to
if task_name == 'debug':
if 'msg' in result:
# msg should be alone
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
# 'var' value as field, so eliminate others and what is left should be varname
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
# FIXME: not called
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
# FIXME: not called
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
# FIXME: not called
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
"""Event used when host begins execution of a task
.. versionadded:: 2.8
"""
pass
|
gpl-3.0
|
fmcingvale/cherrypy_gae
|
test/test_httpauth.py
|
1
|
5799
|
from cherrypy.test import test
test.prefer_parent_path()
import md5, sha
import cherrypy
from cherrypy.lib import httpauth
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class DigestProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
class BasicProtected2:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
def fetch_users():
return {'test': 'test'}
def sha_password_encrypter(password):
return sha.new(password).hexdigest()
def fetch_password(username):
return sha.new('test').hexdigest()
conf = {'/digest': {'tools.digest_auth.on': True,
'tools.digest_auth.realm': 'localhost',
'tools.digest_auth.users': fetch_users},
'/basic': {'tools.basic_auth.on': True,
'tools.basic_auth.realm': 'localhost',
'tools.basic_auth.users': {'test': md5.new('test').hexdigest()}},
'/basic2': {'tools.basic_auth.on': True,
'tools.basic_auth.realm': 'localhost',
'tools.basic_auth.users': fetch_password,
'tools.basic_auth.encrypt': sha_password_encrypter}}
root = Root()
root.digest = DigestProtected()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
cherrypy.config.update({'environment': 'test_suite'})
from cherrypy.test import helper
class HTTPAuthTest(helper.CPWebCase):
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"')
self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZX60')])
self.assertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZXN0')])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"')
self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZX60')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZXN0')])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
def testDigest(self):
self.getPage("/digest/")
self.assertStatus(401)
value = None
for k, v in self.headers:
if k.lower() == "www-authenticate":
if v.startswith("Digest"):
value = v
break
if value is None:
self._handlewebError("Digest authentification scheme was not found")
value = value[7:]
items = value.split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
missing_msg = "%s is missing"
bad_value_msg = "'%s' was expecting '%s' but found '%s'"
nonce = None
if 'realm' not in tokens:
self._handlewebError(missing_msg % 'realm')
elif tokens['realm'] != '"localhost"':
self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm']))
if 'nonce' not in tokens:
self._handlewebError(missing_msg % 'nonce')
else:
nonce = tokens['nonce'].strip('"')
if 'algorithm' not in tokens:
self._handlewebError(missing_msg % 'algorithm')
elif tokens['algorithm'] != '"MD5"':
self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm']))
if 'qop' not in tokens:
self._handlewebError(missing_msg % 'qop')
elif tokens['qop'] != '"auth"':
self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop']))
# Test a wrong 'realm' value
base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth = base_auth % (nonce, '', '00000001')
params = httpauth.parseAuthorization(auth)
response = httpauth._computeDigestResponse(params, 'test')
auth = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth)])
self.assertStatus('401 Unauthorized')
# Test that must pass
base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth = base_auth % (nonce, '', '00000001')
params = httpauth.parseAuthorization(auth)
response = httpauth._computeDigestResponse(params, 'test')
auth = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth)])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
if __name__ == "__main__":
setup_server()
helper.testmain()
|
bsd-3-clause
|
smallyear/linuxLearn
|
salt/salt/returners/sqlite3_return.py
|
1
|
7987
|
# -*- coding: utf-8 -*-
'''
Insert minion return data into a sqlite3 database
:maintainer: Mickey Malone <mickey.malone@gmail.com>
:maturity: New
:depends: None
:platform: All
Sqlite3 is a serverless database that lives in a single file.
In order to use this returner the database file must exist,
have the appropriate schema defined, and be accessible to the
user whom the minion process is running as. This returner
requires the following values configured in the master or
minion config:
.. code-block:: yaml
returner.sqlite3.database: /usr/lib/salt/salt.db
returner.sqlite3.timeout: 5.0
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.returner.sqlite3.database: /usr/lib/salt/salt.db
alternative.returner.sqlite3.timeout: 5.0
Use the commands to create the sqlite3 database and tables:
.. code-block:: sql
sqlite3 /usr/lib/salt/salt.db << EOF
--
-- Table structure for table 'jids'
--
CREATE TABLE jids (
jid TEXT PRIMARY KEY,
load TEXT NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
CREATE TABLE salt_returns (
fun TEXT KEY,
jid TEXT KEY,
id TEXT KEY,
fun_args TEXT,
date TEXT NOT NULL,
full_ret TEXT NOT NULL,
success TEXT NOT NULL
);
EOF
To use the sqlite returner, append '--return sqlite3' to the salt command.
.. code-block:: bash
salt '*' test.ping --return sqlite3
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return sqlite3 --return_config alternative
'''
from __future__ import absolute_import
# Import python libs
import logging
import json
import datetime
# Import Salt libs
import salt.utils.jid
import salt.returners
# Better safe than sorry here. Even though sqlite3 is included in python
try:
import sqlite3
HAS_SQLITE3 = True
except ImportError:
HAS_SQLITE3 = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'sqlite3'
def __virtual__():
if not HAS_SQLITE3:
return False
return __virtualname__
def _get_options(ret=None):
'''
Get the SQLite3 options from salt.
'''
attrs = {'database': 'database',
'timeout': 'timeout'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a sqlite3 database connection
'''
# Possible todo: support detect_types, isolation_level, check_same_thread,
# factory, cached_statements. Do we really need to though?
_options = _get_options(ret)
database = _options.get('database')
timeout = _options.get('timeout')
if not database:
raise Exception(
'sqlite3 config option "returner.sqlite3.database" is missing')
if not timeout:
raise Exception(
'sqlite3 config option "returner.sqlite3.timeout" is missing')
log.debug('Connecting the sqlite3 database: {0} timeout: {1}'.format(
database,
timeout))
conn = sqlite3.connect(database, timeout=float(timeout))
return conn
def _close_conn(conn):
'''
Close the sqlite3 database connection
'''
log.debug('Closing the sqlite3 database connection')
conn.commit()
conn.close()
def returner(ret):
'''
Insert minion return data into the sqlite3 database
'''
log.debug('sqlite3 returner <returner> called with data: {0}'.format(ret))
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, id, fun_args, date, full_ret, success)
VALUES (:fun, :jid, :id, :fun_args, :date, :full_ret, :success)'''
cur.execute(sql,
{'fun': ret['fun'],
'jid': ret['jid'],
'id': ret['id'],
'fun_args': str(ret['fun_args']) if ret['fun_args'] else None,
'date': str(datetime.datetime.now()),
'full_ret': json.dumps(ret['return']),
'success': ret['success']})
_close_conn(conn)
def save_load(jid, load):
'''
Save the load to the specified jid
'''
log.debug('sqlite3 returner <save_load> called jid:{0} load:{1}'
.format(jid, load))
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (:jid, :load)'''
cur.execute(sql,
{'jid': jid,
'load': json.dumps(load)})
_close_conn(conn)
def save_minions(jid, minions): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load from a specified jid
'''
log.debug('sqlite3 returner <get_load> called jid: {0}'.format(jid))
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = :jid'''
cur.execute(sql,
{'jid': jid})
data = cur.fetchone()
if data:
return json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned from a specified jid
'''
log.debug('sqlite3 returner <get_jid> called jid: {0}'.format(jid))
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = :jid'''
cur.execute(sql,
{'jid': jid})
data = cur.fetchone()
log.debug('query result: {0}'.format(data))
ret = {}
if data and len(data) > 1:
ret = {str(data[0]): {u'return': json.loads(data[1])}}
log.debug("ret: {0}".format(ret))
_close_conn(conn)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
log.debug('sqlite3 returner <get_fun> called fun: {0}'.format(fun))
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id, s.full_ret, s.jid
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = :fun
'''
cur.execute(sql,
{'fun': fun})
data = cur.fetchall()
ret = {}
if data:
# Pop the jid off the list since it is not
# needed and I am trying to get a perfect
# pylint score :-)
data.pop()
for minion, ret in data:
ret[minion] = json.loads(ret)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
log.debug('sqlite3 returner <get_fun> called')
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT jid FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for jid in data:
ret.append(jid[0])
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
log.debug('sqlite3 returner <get_minions> called')
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
|
apache-2.0
|
elpaso/QGIS
|
python/plugins/processing/algs/grass7/ext/r_li_padsd_ascii.py
|
45
|
1440
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_padsd_ascii.py
-------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context, True)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
moveOutputTxtFile(alg, parameters, context)
|
gpl-2.0
|
russellthackston/comp-chem-util
|
myriad/v1/bootstrap-myriad.py
|
2
|
2618
|
import argparse
import glob
import importlib
import libmyriad
import logging
import myriad
import os
import requests
import shutil
import subprocess
import sys
import time
class Bootstrap:
def __init__(self):
self.server = 'https://raw.githubusercontent.com/russellthackston/comp-chem-util/master/myriad'
self.version = 'stable'
def downloadMyriad(self):
self.downloadMyriadFile('libmyriad.py')
self.downloadMyriadFile('myriad.py')
importlib.reload(libmyriad)
importlib.reload(myriad)
def downloadMyriadFile(self, filename):
r = requests.get(self.server + "/" + self.version + "/" + filename)
f = open(filename, 'w')
f.write(r.text)
f.flush()
f.close()
def run(self, jobGroup=None, jobCategory=None, server='https://raw.githubusercontent.com/russellthackston/comp-chem-util/master/myriad/', version='stable'):
logging.info("Job group = " + str(jobGroup))
logging.info("Job category = " + str(jobCategory))
logging.info("Server = " + str(server))
logging.info("Version = " + str(version))
# Download a (potentially) updated copy of Myriad
self.server = server
self.version = version
self.downloadMyriad()
# run a myriad job
m = myriad.Myriad()
result = m.runOnce(jobGroup, jobCategory)
if result == libmyriad.ResultCode.shutdown or os.path.isfile('shutdown.myriad'):
logging.info('Shutting down myriad...')
return
elif result == libmyriad.ResultCode.failure:
logging.info('Job failed.')
elif result == libmyriad.ResultCode.noaction:
logging.info('No more jobs. Sleeping for 60 seconds...')
# TO DO: Move this sleep to ec2-user-script-myriad.sh (Needs return codes)
time.sleep(60)
# Download a (potentially) updated copy of Myriad
self.downloadMyriad()
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--group', dest='group', action='store', type=str, help='Optional group for filtering the requested job')
parser.add_argument('--subGroup', dest='subGroup', action='store', type=str, help='Optional sub group for filtering the requested job')
parser.add_argument('--server', dest='server', action='store', type=str, help='Optional server address for updating Myriad')
parser.add_argument('--version', dest='version', action='store', type=str, help='Optional version number of Myriad to update to or "stable"')
args = parser.parse_args()
logging.basicConfig(filename='logs/myriad.log',level=logging.INFO,format='%(asctime)s %(message)s')
logging.info("Bootstrapping Myriad...")
b = Bootstrap()
b.run(jobGroup=args.group, jobCategory=args.subGroup, server=args.server, version=args.version)
|
mit
|
alsrgv/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
|
3
|
32746
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.compat.v1.placeholder for input features mini batch.
output_placeholder: tf.compat.v1.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
|
apache-2.0
|
nthall/pip
|
pip/_vendor/progress/helpers.py
|
521
|
2854
|
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
|
mit
|
thedep2/CouchPotatoServer
|
libs/rsa/randnum.py
|
194
|
2414
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for generating random numbers.'''
# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
import os
from rsa import common, transform
from rsa._compat import byte
def read_random_bits(nbits):
'''Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
'''
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = byte(randomvalue) + randomdata
return randomdata
def read_random_int(nbits):
'''Reads a random integer of approximately nbits bits.
'''
randomdata = read_random_bits(nbits)
value = transform.bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def randint(maxvalue):
'''Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
'''
bit_size = common.bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries and tries % 10 == 0:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
|
gpl-3.0
|
dippatel1994/oppia
|
scripts/common.py
|
30
|
1406
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions and classes used by multiple Python scripts."""
import os
def ensure_directory_exists(d):
"""Creates the given directory if it does not already exist."""
if not os.path.exists(d):
os.makedirs(d)
def require_cwd_to_be_oppia():
"""Ensures that the current working directory ends in 'oppia'."""
if not os.getcwd().endswith('oppia'):
raise Exception('Please run this script from the oppia/ directory.')
class CD(object):
"""Context manager for changing the current working directory."""
def __init__(self, new_path):
self.new_path = new_path
def __enter__(self):
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.saved_path)
|
apache-2.0
|
ThiagoGarciaAlves/intellij-community
|
python/helpers/py2only/docutils/parsers/rst/languages/ca.py
|
128
|
4467
|
# $Id: ca.py 7119 2011-09-02 13:00:23Z milde $
# Author: Ivan Vilata i Balaguer <ivan@selidor.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Catalan-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atenci\u00F3': 'attention',
u'compte': 'caution',
u'code (translation required)': 'code',
u'perill': 'danger',
u'error': 'error',
u'suggeriment': 'hint',
u'important': 'important',
u'nota': 'note',
u'consell': 'tip',
u'av\u00EDs': 'warning',
u'advertiment': 'admonition',
u'nota-al-marge': 'sidebar',
u'nota-marge': 'sidebar',
u'tema': 'topic',
u'bloc-de-l\u00EDnies': 'line-block',
u'bloc-l\u00EDnies': 'line-block',
u'literal-analitzat': 'parsed-literal',
u'r\u00FAbrica': 'rubric',
u'ep\u00EDgraf': 'epigraph',
u'sumari': 'highlights',
u'cita-destacada': 'pull-quote',
u'compost': 'compound',
u'container (translation required)': 'container',
#'questions': 'questions',
u'taula': 'table',
u'taula-csv': 'csv-table',
u'taula-llista': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'math (translation required)': 'math',
u'meta': 'meta',
#'imagemap': 'imagemap',
u'imatge': 'image',
u'figura': 'figure',
u'inclou': 'include',
u'incloure': 'include',
u'cru': 'raw',
u'reempla\u00E7a': 'replace',
u'reempla\u00E7ar': 'replace',
u'unicode': 'unicode',
u'data': 'date',
u'classe': 'class',
u'rol': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'contingut': 'contents',
u'numsec': 'sectnum',
u'numeraci\u00F3-de-seccions': 'sectnum',
u'numeraci\u00F3-seccions': 'sectnum',
u'cap\u00E7alera': 'header',
u'peu-de-p\u00E0gina': 'footer',
u'peu-p\u00E0gina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'notes-amb-destinacions': 'target-notes',
u'notes-destinacions': 'target-notes',
u'directiva-de-prova-de-restructuredtext': 'restructuredtext-test-directive'}
"""Catalan name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abreviatura': 'abbreviation',
u'abreviaci\u00F3': 'abbreviation',
u'abrev': 'abbreviation',
u'ab': 'abbreviation',
u'acr\u00F2nim': 'acronym',
u'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00EDndex': 'index',
u'i': 'index',
u'sub\u00EDndex': 'subscript',
u'sub': 'subscript',
u'super\u00EDndex': 'superscript',
u'sup': 'superscript',
u'refer\u00E8ncia-a-t\u00EDtol': 'title-reference',
u'refer\u00E8ncia-t\u00EDtol': 'title-reference',
u't\u00EDtol': 'title-reference',
u't': 'title-reference',
u'refer\u00E8ncia-a-pep': 'pep-reference',
u'refer\u00E8ncia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'refer\u00E8ncia-a-rfc': 'rfc-reference',
u'refer\u00E8ncia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'\u00E8mfasi': 'emphasis',
u'destacat': 'strong',
u'literal': 'literal',
u'math (translation required)': 'math',
u'refer\u00E8ncia-amb-nom': 'named-reference',
u'refer\u00E8ncia-nom': 'named-reference',
u'refer\u00E8ncia-an\u00F2nima': 'anonymous-reference',
u'refer\u00E8ncia-a-nota-al-peu': 'footnote-reference',
u'refer\u00E8ncia-nota-al-peu': 'footnote-reference',
u'refer\u00E8ncia-a-cita': 'citation-reference',
u'refer\u00E8ncia-cita': 'citation-reference',
u'refer\u00E8ncia-a-substituci\u00F3': 'substitution-reference',
u'refer\u00E8ncia-substituci\u00F3': 'substitution-reference',
u'destinaci\u00F3': 'target',
u'refer\u00E8ncia-a-uri': 'uri-reference',
u'refer\u00E8ncia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'cru': 'raw',}
"""Mapping of Catalan role names to canonical role names for interpreted text.
"""
|
apache-2.0
|
tsiegleauq/OpenSlides
|
server/openslides/utils/models.py
|
6
|
7291
|
import time
from typing import Any, Dict, List, Optional
from django.db import models
from . import logging
from .autoupdate import AutoupdateElement, inform_changed_data, inform_elements
from .rest_api import model_serializer_classes
from .utils import convert_camel_case_to_pseudo_snake_case, get_element_id
logger = logging.getLogger(__name__)
class MinMaxIntegerField(models.IntegerField):
"""
IntegerField with options to set a min- and a max-value.
"""
def __init__(
self, min_value: int = None, max_value: int = None, *args: Any, **kwargs: Any
) -> None:
self.min_value, self.max_value = min_value, max_value
super(MinMaxIntegerField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs: Any) -> Any:
defaults = {"min_value": self.min_value, "max_value": self.max_value}
defaults.update(kwargs)
return super(MinMaxIntegerField, self).formfield(**defaults)
class RESTModelMixin:
"""
Mixin for Django models which are used in our REST API.
"""
def get_root_rest_element(self) -> models.Model:
"""
Returns the root rest instance.
Uses self as default.
"""
return self
@classmethod
def get_collection_string(cls) -> str:
"""
Returns the string representing the name of the collection. Returns
None if this is not a so called root rest instance.
"""
# TODO Check if this is a root rest element class and return None if not.
app_label = cls._meta.app_label # type: ignore
object_name = cls._meta.object_name # type: ignore
return "/".join(
(
convert_camel_case_to_pseudo_snake_case(app_label),
convert_camel_case_to_pseudo_snake_case(object_name),
)
)
def get_rest_pk(self) -> int:
"""
Returns the primary key used in the REST API. By default this is
the database pk.
"""
return self.pk # type: ignore
def get_element_id(self) -> str:
return get_element_id(self.get_collection_string(), self.get_rest_pk())
def save(
self,
skip_autoupdate: bool = False,
disable_history: bool = False,
*args: Any,
**kwargs: Any,
) -> Any:
"""
Calls Django's save() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_changed_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data
return_value = super().save(*args, **kwargs) # type: ignore
if not skip_autoupdate:
inform_changed_data(
self.get_root_rest_element(),
disable_history=disable_history,
)
return return_value
def delete(self, skip_autoupdate: bool = False, *args: Any, **kwargs: Any) -> Any:
"""
Calls Django's delete() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_deleted_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data, inform_deleted_data
instance_pk = self.pk # type: ignore
return_value = super().delete(*args, **kwargs) # type: ignore
if not skip_autoupdate:
if self != self.get_root_rest_element():
# The deletion of a included element is a change of the root element.
inform_changed_data(self.get_root_rest_element())
else:
inform_deleted_data([(self.get_collection_string(), instance_pk)])
return return_value
@classmethod
def get_elements(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
"""
Returns all elements as full_data.
"""
do_logging = not bool(ids)
if do_logging:
logger.info(f"Loading {cls.get_collection_string()}")
# Get the query to receive all data from the database.
try:
query = cls.objects.get_prefetched_queryset(ids=ids) # type: ignore
except AttributeError:
# If the model des not have to method get_prefetched_queryset(), then use
# the default queryset from django.
query = cls.objects # type: ignore
if ids:
query = query.filter(pk__in=ids)
# Build a dict from the instance id to the full_data
instances = query.all()
full_data = []
# For logging the progress
last_time = time.time()
instances_length = len(instances) # this evaluates the query
for i, instance in enumerate(instances):
# Append full data from this instance
full_data.append(instance.get_full_data())
if do_logging:
# log progress every 5 seconds
current_time = time.time()
if current_time > last_time + 5:
last_time = current_time
logger.info(f" {i+1}/{instances_length}...")
return full_data
def get_full_data(self) -> Dict[str, Any]:
"""
Returns the full_data of the instance.
"""
try:
serializer_class = model_serializer_classes[type(self)]
except KeyError:
# Because of the order of imports, it can happen, that the serializer
# for a model is not imported yet. Try to guess the name of the
# module and import it.
module_name = type(self).__module__.rsplit(".", 1)[0] + ".serializers"
__import__(module_name)
serializer_class = model_serializer_classes[type(self)]
return serializer_class(self).data
def SET_NULL_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.SET_NULL but also informs the autoupdate system about the
instance that was reference.
"""
instances = []
for sub_obj in sub_objs:
setattr(sub_obj, field.name, None)
instances.append(sub_obj)
inform_changed_data(instances)
models.SET_NULL(collector, field, sub_objs, using)
def CASCADE_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.CASCADE but also informs the autoupdate system about the
root rest element of the also deleted instance.
"""
elements = []
for sub_obj in sub_objs:
root_rest_element = sub_obj.get_root_rest_element()
elements.append(
AutoupdateElement(
collection_string=root_rest_element.get_collection_string(),
id=root_rest_element.get_rest_pk(),
)
)
inform_elements(elements)
models.CASCADE(collector, field, sub_objs, using)
|
mit
|
turbokongen/home-assistant
|
homeassistant/components/zestimate/sensor.py
|
19
|
4377
|
"""Support for zestimate data from zillow.com."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "http://www.zillow.com/webservice/GetZestimate.htm"
ATTRIBUTION = "Data provided by Zillow.com"
CONF_ZPID = "zpid"
DEFAULT_NAME = "Zestimate"
NAME = "zestimate"
ZESTIMATE = f"{DEFAULT_NAME}:{NAME}"
ICON = "mdi:home-variant"
ATTR_AMOUNT = "amount"
ATTR_CHANGE = "amount_change_30_days"
ATTR_CURRENCY = "amount_currency"
ATTR_LAST_UPDATED = "amount_last_updated"
ATTR_VAL_HI = "valuation_range_high"
ATTR_VAL_LOW = "valuation_range_low"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ZPID): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SCAN_INTERVAL = timedelta(minutes=30)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zestimate sensor."""
name = config.get(CONF_NAME)
properties = config[CONF_ZPID]
sensors = []
for zpid in properties:
params = {"zws-id": config[CONF_API_KEY]}
params["zpid"] = zpid
sensors.append(ZestimateDataSensor(name, params))
add_entities(sensors, True)
class ZestimateDataSensor(Entity):
"""Implementation of a Zestimate sensor."""
def __init__(self, name, params):
"""Initialize the sensor."""
self._name = name
self.params = params
self.data = None
self.address = None
self._state = None
@property
def unique_id(self):
"""Return the ZPID."""
return self.params["zpid"]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self.address}"
@property
def state(self):
"""Return the state of the sensor."""
try:
return round(float(self._state), 1)
except ValueError:
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
if self.data is not None:
attributes = self.data
attributes["address"] = self.address
attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return attributes
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and update the states."""
try:
response = requests.get(_RESOURCE, params=self.params, timeout=5)
data = response.content.decode("utf-8")
data_dict = xmltodict.parse(data).get(ZESTIMATE)
error_code = int(data_dict["message"]["code"])
if error_code != 0:
_LOGGER.error("The API returned: %s", data_dict["message"]["text"])
return
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from %s", _RESOURCE)
return
data = data_dict["response"][NAME]
details = {}
if "amount" in data and data["amount"] is not None:
details[ATTR_AMOUNT] = data["amount"]["#text"]
details[ATTR_CURRENCY] = data["amount"]["@currency"]
if "last-updated" in data and data["last-updated"] is not None:
details[ATTR_LAST_UPDATED] = data["last-updated"]
if "valueChange" in data and data["valueChange"] is not None:
details[ATTR_CHANGE] = int(data["valueChange"]["#text"])
if "valuationRange" in data and data["valuationRange"] is not None:
details[ATTR_VAL_HI] = int(data["valuationRange"]["high"]["#text"])
details[ATTR_VAL_LOW] = int(data["valuationRange"]["low"]["#text"])
self.address = data_dict["response"]["address"]["street"]
self.data = details
if self.data is not None:
self._state = self.data[ATTR_AMOUNT]
else:
self._state = None
_LOGGER.error("Unable to parase Zestimate data from response")
|
apache-2.0
|
tmpgit/intellij-community
|
python/helpers/docutils/parsers/rst/roles.py
|
54
|
13187
|
# $Id: roles.py 6121 2009-09-10 12:05:04Z milde $
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Copyright: This module has been placed in the public domain.
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
|
apache-2.0
|
nycholas/ask-undrgz
|
src/ask-undrgz/django/contrib/messages/storage/cookie.py
|
65
|
5862
|
import hmac
from django.conf import settings
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import CompatCookie
from django.utils import simplejson as json
from django.utils.hashcompat import sha_hmac
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
message = [self.message_key, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
return Message(*obj[1:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return dict([(key, self.process_messages(value))
for key, value in obj.iteritems()])
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# We should be able to store 4K in a cookie, but Internet Explorer
# imposes 4K as the *total* limit for a domain. To allow other
# cookies, we go for 3/4 of 4K.
max_cookie_size = 3072
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data)
else:
response.delete_cookie(self.cookie_name)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by CompatCookie, which
# adds it's own overhead, which we must account for.
cookie = CompatCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key = 'django.contrib.messages' + settings.SECRET_KEY
return hmac.new(key, value, sha_hmac).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes a encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if hash == self._hash(value):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.